aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--contrib/cirrus/CIModes.md2
-rw-r--r--go.mod12
-rw-r--r--go.sum490
-rw-r--r--test/apiv2/12-imagesMore.at2
-rwxr-xr-xtest/buildah-bud/apply-podman-deltas13
-rwxr-xr-xtest/buildah-bud/run-buildah-bud-tests9
-rw-r--r--test/system/010-images.bats6
-rw-r--r--test/system/120-load.bats1
-rw-r--r--test/system/170-run-userns.bats4
-rw-r--r--test/system/200-pod.bats2
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod2
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum4
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go67
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/types.go3
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml73
-rw-r--r--vendor/github.com/containers/buildah/Makefile10
-rw-r--r--vendor/github.com/containers/buildah/add.go77
-rw-r--r--vendor/github.com/containers/buildah/bind/mount.go39
-rw-r--r--vendor/github.com/containers/buildah/buildah.go7
-rw-r--r--vendor/github.com/containers/buildah/buildah.spec.rpkg168
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go102
-rw-r--r--vendor/github.com/containers/buildah/chroot/seccomp.go26
-rw-r--r--vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go4
-rw-r--r--vendor/github.com/containers/buildah/chroot/selinux.go6
-rw-r--r--vendor/github.com/containers/buildah/chroot/selinux_unsupported.go4
-rw-r--r--vendor/github.com/containers/buildah/chroot/unsupported.go7
-rw-r--r--vendor/github.com/containers/buildah/commit.go47
-rw-r--r--vendor/github.com/containers/buildah/config.go18
-rw-r--r--vendor/github.com/containers/buildah/copier/copier.go168
-rw-r--r--vendor/github.com/containers/buildah/copier/mknod_int.go12
-rw-r--r--vendor/github.com/containers/buildah/copier/mknod_uint64.go12
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_unix.go13
-rw-r--r--vendor/github.com/containers/buildah/copier/xattrs.go20
-rw-r--r--vendor/github.com/containers/buildah/define/build.go2
-rw-r--r--vendor/github.com/containers/buildah/define/mount_freebsd.go17
-rw-r--r--vendor/github.com/containers/buildah/define/mount_linux.go17
-rw-r--r--vendor/github.com/containers/buildah/define/types.go24
-rw-r--r--vendor/github.com/containers/buildah/delete.go6
-rw-r--r--vendor/github.com/containers/buildah/digester.go14
-rw-r--r--vendor/github.com/containers/buildah/go.mod55
-rw-r--r--vendor/github.com/containers/buildah/go.sum582
-rw-r--r--vendor/github.com/containers/buildah/image.go58
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go81
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go136
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go234
-rw-r--r--vendor/github.com/containers/buildah/import.go31
-rw-r--r--vendor/github.com/containers/buildah/info.go4
-rw-r--r--vendor/github.com/containers/buildah/internal/parse/parse.go79
-rw-r--r--vendor/github.com/containers/buildah/internal/util/util.go16
-rw-r--r--vendor/github.com/containers/buildah/mount.go16
-rw-r--r--vendor/github.com/containers/buildah/new.go45
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user.go12
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go5
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go (renamed from vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go)3
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/build.go24
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go8
-rw-r--r--vendor/github.com/containers/buildah/pkg/jail/jail.go180
-rw-r--r--vendor/github.com/containers/buildah/pkg/jail/jail_int32.go20
-rw-r--r--vendor/github.com/containers/buildah/pkg/jail/jail_int64.go19
-rw-r--r--vendor/github.com/containers/buildah/pkg/overlay/overlay.go31
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go164
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse_unix.go9
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go6
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go6
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go6
-rw-r--r--vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go7
-rw-r--r--vendor/github.com/containers/buildah/pkg/util/util.go9
-rw-r--r--vendor/github.com/containers/buildah/pull.go5
-rw-r--r--vendor/github.com/containers/buildah/push.go5
-rw-r--r--vendor/github.com/containers/buildah/run_common.go1881
-rw-r--r--vendor/github.com/containers/buildah/run_freebsd.go548
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go1974
-rw-r--r--vendor/github.com/containers/buildah/run_unix.go6
-rw-r--r--vendor/github.com/containers/buildah/run_unsupported.go6
-rw-r--r--vendor/github.com/containers/buildah/seccomp.go9
-rw-r--r--vendor/github.com/containers/buildah/selinux.go5
-rw-r--r--vendor/github.com/containers/buildah/unmount.go8
-rw-r--r--vendor/github.com/containers/buildah/util.go15
-rw-r--r--vendor/github.com/containers/buildah/util/util.go43
-rw-r--r--vendor/github.com/containers/buildah/util/util_uint64.go3
-rw-r--r--vendor/github.com/containers/buildah/util/util_unix.go3
-rw-r--r--vendor/github.com/containers/common/libimage/copier.go13
-rw-r--r--vendor/github.com/containers/common/libimage/filters.go13
-rw-r--r--vendor/github.com/containers/common/libimage/image.go49
-rw-r--r--vendor/github.com/containers/common/libimage/image_config.go31
-rw-r--r--vendor/github.com/containers/common/libimage/import.go4
-rw-r--r--vendor/github.com/containers/common/libimage/manifest_list.go31
-rw-r--r--vendor/github.com/containers/common/libimage/manifests/manifests.go53
-rw-r--r--vendor/github.com/containers/common/libimage/normalize.go6
-rw-r--r--vendor/github.com/containers/common/libimage/pull.go36
-rw-r--r--vendor/github.com/containers/common/libimage/runtime.go30
-rw-r--r--vendor/github.com/containers/common/libimage/save.go17
-rw-r--r--vendor/github.com/containers/common/libimage/search.go13
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go82
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/cni_types.go16
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/config.go15
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/network.go11
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/run.go12
-rw-r--r--vendor/github.com/containers/common/libnetwork/internal/util/bridge.go6
-rw-r--r--vendor/github.com/containers/common/libnetwork/internal/util/create.go7
-rw-r--r--vendor/github.com/containers/common/libnetwork/internal/util/ip.go6
-rw-r--r--vendor/github.com/containers/common/libnetwork/internal/util/parse.go7
-rw-r--r--vendor/github.com/containers/common/libnetwork/internal/util/validate.go17
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/config.go39
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/exec.go3
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/ipam.go3
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/network.go20
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/run.go5
-rw-r--r--vendor/github.com/containers/common/libnetwork/network/interface.go5
-rw-r--r--vendor/github.com/containers/common/libnetwork/types/const.go6
-rw-r--r--vendor/github.com/containers/common/libnetwork/types/define.go6
-rw-r--r--vendor/github.com/containers/common/libnetwork/util/filters.go4
-rw-r--r--vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go63
-rw-r--r--vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go5
-rw-r--r--vendor/github.com/containers/common/pkg/auth/auth.go39
-rw-r--r--vendor/github.com/containers/common/pkg/capabilities/capabilities.go11
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/blkio.go8
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go9
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cgroups.go26
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go24
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go10
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cpu.go8
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go8
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go97
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/utils.go15
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/utils_linux.go2
-rw-r--r--vendor/github.com/containers/common/pkg/chown/chown_unix.go13
-rw-r--r--vendor/github.com/containers/common/pkg/chown/chown_windows.go2
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go9
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf7
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go1
-rw-r--r--vendor/github.com/containers/common/pkg/config/pull_policy.go10
-rw-r--r--vendor/github.com/containers/common/pkg/filters/filters.go3
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/0.1.0/hook.go88
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go89
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/1.0.0/when.go96
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/README.md22
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/exec/exec.go69
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go72
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/hooks.go146
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/monitor.go66
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/read.go101
-rw-r--r--vendor/github.com/containers/common/pkg/hooks/version.go6
-rw-r--r--vendor/github.com/containers/common/pkg/manifests/manifests.go27
-rw-r--r--vendor/github.com/containers/common/pkg/parse/parse.go36
-rw-r--r--vendor/github.com/containers/common/pkg/parse/parse_unix.go8
-rw-r--r--vendor/github.com/containers/common/pkg/retry/retry.go3
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/conversion.go17
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/filter.go38
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/validate.go9
-rw-r--r--vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go11
-rw-r--r--vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go14
-rw-r--r--vendor/github.com/containers/common/pkg/secrets/secrets.go21
-rw-r--r--vendor/github.com/containers/common/pkg/secrets/secretsdb.go18
-rw-r--r--vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go7
-rw-r--r--vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go41
-rw-r--r--vendor/github.com/containers/common/pkg/supplemented/supplemented.go48
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go7
-rw-r--r--vendor/github.com/containers/common/pkg/util/util_supported.go4
-rw-r--r--vendor/github.com/containers/common/pkg/util/util_windows.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/blob.go17
-rw-r--r--vendor/github.com/containers/image/v5/copy/compression.go7
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go175
-rw-r--r--vendor/github.com/containers/image/v5/copy/digesting_reader.go10
-rw-r--r--vendor/github.com/containers/image/v5/copy/encryption.go8
-rw-r--r--vendor/github.com/containers/image/v5/copy/manifest.go5
-rw-r--r--vendor/github.com/containers/image/v5/copy/sign.go40
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_dest.go125
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_src.go53
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_transport.go8
-rw-r--r--vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go7
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/dest.go14
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/reader.go17
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/src.go5
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/transport.go20
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/writer.go7
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go16
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go11
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go14
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_client.go209
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go24
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go378
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go265
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_transport.go10
-rw-r--r--vendor/github.com/containers/image/v5/docker/errors.go3
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go110
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go29
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go69
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go22
-rw-r--r--vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go7
-rw-r--r--vendor/github.com/containers/image/v5/docker/registries_d.go (renamed from vendor/github.com/containers/image/v5/docker/lookaside.go)166
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/docker_list.go12
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/docker_schema1.go12
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/docker_schema2.go16
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/manifest.go3
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/memory.go2
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/oci.go6
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/oci_index.go12
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/unparsed.go34
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go77
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go72
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go52
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go50
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go25
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go52
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go54
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go23
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go27
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go19
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go52
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go25
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go49
-rw-r--r--vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go5
-rw-r--r--vendor/github.com/containers/image/v5/internal/private/private.go64
-rw-r--r--vendor/github.com/containers/image/v5/internal/signature/signature.go111
-rw-r--r--vendor/github.com/containers/image/v5/internal/signature/sigstore.go84
-rw-r--r--vendor/github.com/containers/image/v5/internal/signature/simple.go27
-rw-r--r--vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go38
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema1.go27
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2.go5
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go17
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go11
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci_index.go17
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_dest.go69
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_src.go53
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_transport.go12
-rw-r--r--vendor/github.com/containers/image/v5/oci/internal/oci_util.go15
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_dest.go114
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_src.go62
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_transport.go8
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift-copies.go36
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift.go368
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift_dest.go247
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift_src.go173
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift_transport.go12
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_dest.go110
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_src.go50
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_transport.go12
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go462
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobcache/dest.go294
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobcache/src.go270
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/compression.go5
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go68
-rw-r--r--vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go32
-rw-r--r--vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go30
-rw-r--r--vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go13
-rw-r--r--vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go8
-rw-r--r--vendor/github.com/containers/image/v5/sif/src.go50
-rw-r--r--vendor/github.com/containers/image/v5/signature/docker.go11
-rw-r--r--vendor/github.com/containers/image/v5/signature/internal/errors.go15
-rw-r--r--vendor/github.com/containers/image/v5/signature/internal/json.go (renamed from vendor/github.com/containers/image/v5/signature/json.go)36
-rw-r--r--vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go201
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism.go2
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go19
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go20
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_config.go190
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval.go23
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go6
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go50
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go140
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_simple.go10
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_reference_match.go16
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_types.go29
-rw-r--r--vendor/github.com/containers/image/v5/signature/sigstore/copied.go70
-rw-r--r--vendor/github.com/containers/image/v5/signature/sigstore/sign.go65
-rw-r--r--vendor/github.com/containers/image/v5/signature/simple.go (renamed from vendor/github.com/containers/image/v5/signature/signature.go)37
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_dest.go924
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_image.go1297
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_reference.go10
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_src.go380
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_transport.go12
-rw-r--r--vendor/github.com/containers/image/v5/tarball/tarball_src.go41
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go6
-rw-r--r--vendor/github.com/containers/storage/containers.go13
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go28
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go37
-rw-r--r--vendor/github.com/containers/storage/drivers/chown.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_darwin.go12
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_unix.go12
-rw-r--r--vendor/github.com/containers/storage/drivers/chroot_unix.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/chroot_windows.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/copy/copy_linux.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/device_setup.go44
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go76
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go13
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/check.go26
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/mount.go9
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go107
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/randomid.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go28
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go21
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go36
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go5
-rw-r--r--vendor/github.com/containers/storage/go.mod7
-rw-r--r--vendor/github.com/containers/storage/go.sum16
-rw-r--r--vendor/github.com/containers/storage/idset.go6
-rw-r--r--vendor/github.com/containers/storage/images.go79
-rw-r--r--vendor/github.com/containers/storage/layers.go88
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go22
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_windows.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/diff.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go21
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go18
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/cache_linux.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compression.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils.go8
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go17
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/parser.go8
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go32
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/utils_unix.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/lockfile/lockfile.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/flags.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/parsers.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/system/path_windows.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/system/rm.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/system/syscall_unix.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go60
-rw-r--r--vendor/github.com/containers/storage/store.go95
-rw-r--r--vendor/github.com/containers/storage/types/idmappings.go7
-rw-r--r--vendor/github.com/containers/storage/types/options.go80
-rw-r--r--vendor/github.com/containers/storage/types/utils.go6
-rw-r--r--vendor/github.com/containers/storage/userns.go8
-rw-r--r--vendor/github.com/coreos/go-systemd/activation/listeners.go4
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/decode.go524
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/encode.go559
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/json.go69
-rw-r--r--vendor/github.com/google/go-containerregistry/LICENSE202
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/README.md3
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/check.go43
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/digest.go96
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/doc.go42
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/errors.go42
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/options.go83
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/ref.go75
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/registry.go136
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/repository.go121
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/tag.go108
-rw-r--r--vendor/github.com/klauspost/compress/README.md21
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go68
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go74
-rw-r--r--vendor/github.com/klauspost/compress/flate/stateless.go4
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.go10
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s686
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder.go63
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go64
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s127
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go72
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s1999
-rw-r--r--vendor/github.com/letsencrypt/boulder/LICENSE.txt375
-rw-r--r--vendor/github.com/letsencrypt/boulder/core/challenges.go27
-rw-r--r--vendor/github.com/letsencrypt/boulder/core/interfaces.go14
-rw-r--r--vendor/github.com/letsencrypt/boulder/core/objects.go536
-rw-r--r--vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go1100
-rw-r--r--vendor/github.com/letsencrypt/boulder/core/proto/core.proto95
-rw-r--r--vendor/github.com/letsencrypt/boulder/core/util.go298
-rw-r--r--vendor/github.com/letsencrypt/boulder/errors/errors.go150
-rw-r--r--vendor/github.com/letsencrypt/boulder/features/featureflag_string.go45
-rw-r--r--vendor/github.com/letsencrypt/boulder/features/features.go158
-rw-r--r--vendor/github.com/letsencrypt/boulder/goodkey/blocked.go98
-rw-r--r--vendor/github.com/letsencrypt/boulder/goodkey/good_key.go432
-rw-r--r--vendor/github.com/letsencrypt/boulder/goodkey/weak.go66
-rw-r--r--vendor/github.com/letsencrypt/boulder/identifier/identifier.go32
-rw-r--r--vendor/github.com/letsencrypt/boulder/probs/probs.go349
-rw-r--r--vendor/github.com/letsencrypt/boulder/revocation/reasons.go74
-rw-r--r--vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go3449
-rw-r--r--vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto272
-rw-r--r--vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go1515
-rw-r--r--vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go46
-rw-r--r--vendor/github.com/openshift/imagebuilder/.travis.yml15
-rw-r--r--vendor/github.com/openshift/imagebuilder/Makefile4
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go36
-rw-r--r--vendor/github.com/openshift/imagebuilder/dispatchers.go33
-rw-r--r--vendor/github.com/proglottis/gpgme/.appveyor.yml40
-rw-r--r--vendor/github.com/proglottis/gpgme/.travis.yml32
-rw-r--r--vendor/github.com/proglottis/gpgme/gpgme.go1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/README.md2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go38
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go8
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go8
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go494
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go107
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go408
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go28
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go142
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go6
-rw-r--r--vendor/github.com/sigstore/sigstore/COPYRIGHT.txt14
-rw-r--r--vendor/github.com/sigstore/sigstore/LICENSE202
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go170
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go31
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go96
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go144
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go174
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/doc.go17
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go244
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go197
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/message.go111
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options.go57
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go36
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go35
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go50
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go49
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go41
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go32
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go58
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go40
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go104
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go25
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go225
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go260
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/signer.go89
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go69
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/util.go55
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go100
-rw-r--r--vendor/github.com/theupdateframework/go-tuf/LICENSE27
-rw-r--r--vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go226
-rw-r--r--vendor/github.com/titanous/rocacheck/LICENSE22
-rw-r--r--vendor/github.com/titanous/rocacheck/README.md7
-rw-r--r--vendor/github.com/titanous/rocacheck/rocacheck.go52
-rw-r--r--vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go173
-rw-r--r--vendor/golang.org/x/crypto/ocsp/ocsp.go792
-rw-r--r--vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go144
-rw-r--r--vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go199
-rw-r--r--vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go24
-rw-r--r--vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s881
-rw-r--r--vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go15
-rw-r--r--vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go231
-rw-r--r--vendor/golang.org/x/crypto/scrypt/scrypt.go212
-rw-r--r--vendor/golang.org/x/crypto/sha3/doc.go62
-rw-r--r--vendor/golang.org/x/crypto/sha3/hashes.go97
-rw-r--r--vendor/golang.org/x/crypto/sha3/hashes_generic.go28
-rw-r--r--vendor/golang.org/x/crypto/sha3/keccakf.go413
-rw-r--r--vendor/golang.org/x/crypto/sha3/keccakf_amd64.go14
-rw-r--r--vendor/golang.org/x/crypto/sha3/keccakf_amd64.s391
-rw-r--r--vendor/golang.org/x/crypto/sha3/register.go19
-rw-r--r--vendor/golang.org/x/crypto/sha3/sha3.go193
-rw-r--r--vendor/golang.org/x/crypto/sha3/sha3_s390x.go287
-rw-r--r--vendor/golang.org/x/crypto/sha3/sha3_s390x.s34
-rw-r--r--vendor/golang.org/x/crypto/sha3/shake.go173
-rw-r--r--vendor/golang.org/x/crypto/sha3/shake_generic.go20
-rw-r--r--vendor/golang.org/x/crypto/sha3/xor.go24
-rw-r--r--vendor/golang.org/x/crypto/sha3/xor_generic.go28
-rw-r--r--vendor/golang.org/x/crypto/sha3/xor_unaligned.go68
-rw-r--r--vendor/golang.org/x/net/context/context.go6
-rw-r--r--vendor/golang.org/x/net/context/go17.go10
-rw-r--r--vendor/golang.org/x/net/context/pre_go17.go10
-rw-r--r--vendor/golang.org/x/net/http/httpguts/httplex.go54
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go3
-rw-r--r--vendor/golang.org/x/net/http2/errors.go2
-rw-r--r--vendor/golang.org/x/net/http2/frame.go3
-rw-r--r--vendor/golang.org/x/net/http2/hpack/huffman.go87
-rw-r--r--vendor/golang.org/x/net/http2/http2.go14
-rw-r--r--vendor/golang.org/x/net/http2/server.go135
-rw-r--r--vendor/golang.org/x/net/http2/transport.go18
-rw-r--r--vendor/golang.org/x/net/http2/writesched_priority.go9
-rw-r--r--vendor/golang.org/x/net/idna/trieval.go34
-rw-r--r--vendor/golang.org/x/sync/errgroup/errgroup.go70
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c29
-rw-r--r--vendor/golang.org/x/sys/unix/asm_linux_loong64.s4
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_aix.go4
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_bsd.go46
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin.go7
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_illumos.go5
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go45
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_loong64.go39
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_solaris.go51
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_unix.go74
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux.go39
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go3
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go4
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go25
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go11
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go14
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go73
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go73
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux.go22
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_386.go7
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm.go7
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips.go7
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go7
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go7
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go2
-rw-r--r--vendor/google.golang.org/grpc/CONTRIBUTING.md7
-rw-r--r--vendor/google.golang.org/grpc/balancer/balancer.go3
-rw-r--r--vendor/google.golang.org/grpc/balancer_conn_wrappers.go318
-rw-r--r--vendor/google.golang.org/grpc/channelz/channelz.go36
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go350
-rw-r--r--vendor/google.golang.org/grpc/credentials/insecure/insecure.go26
-rw-r--r--vendor/google.golang.org/grpc/dialoptions.go52
-rw-r--r--vendor/google.golang.org/grpc/encoding/encoding.go2
-rw-r--r--vendor/google.golang.org/grpc/go.mod12
-rw-r--r--vendor/google.golang.org/grpc/go.sum30
-rw-r--r--vendor/google.golang.org/grpc/interceptor.go9
-rw-r--r--vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go382
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/binarylog.go91
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/env_config.go6
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/method_logger.go26
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/funcs.go228
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/id.go75
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/logging.go91
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types.go23
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/xds.go12
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go13
-rw-r--r--vendor/google.golang.org/grpc/internal/metadata/metadata.go46
-rw-r--r--vendor/google.golang.org/grpc/internal/pretty/pretty.go82
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/controlbuf.go6
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_client.go31
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_server.go93
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/transport.go11
-rw-r--r--vendor/google.golang.org/grpc/metadata/metadata.go8
-rw-r--r--vendor/google.golang.org/grpc/picker_wrapper.go8
-rw-r--r--vendor/google.golang.org/grpc/pickfirst.go126
-rw-r--r--vendor/google.golang.org/grpc/regenerate.sh12
-rw-r--r--vendor/google.golang.org/grpc/resolver/resolver.go8
-rw-r--r--vendor/google.golang.org/grpc/resolver_conn_wrapper.go23
-rw-r--r--vendor/google.golang.org/grpc/server.go92
-rw-r--r--vendor/google.golang.org/grpc/service_config.go5
-rw-r--r--vendor/google.golang.org/grpc/stream.go235
-rw-r--r--vendor/google.golang.org/grpc/version.go2
-rw-r--r--vendor/google.golang.org/grpc/vet.sh2
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protojson/decode.go665
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protojson/doc.go11
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protojson/encode.go344
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go889
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/json/decode.go340
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go254
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go91
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go192
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/json/encode.go276
-rw-r--r--vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go168
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/.travis.yml4
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go2
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/crypter.go1
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/decode.go52
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/stream.go7
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/jwk.go6
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/opaque.go2
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/shared.go6
-rw-r--r--vendor/modules.txt80
585 files changed, 40460 insertions, 10271 deletions
diff --git a/Makefile b/Makefile
index 32de58856..4dc173400 100644
--- a/Makefile
+++ b/Makefile
@@ -258,7 +258,7 @@ test/version/version: version/version.go
.PHONY: codespell
codespell:
- codespell -S bin,vendor,.git,go.sum,.cirrus.yml,"RELEASE_NOTES.md,*.xz,*.gz,*.ps1,*.tar,swagger.yaml,*.tgz,bin2img,*ico,*.png,*.1,*.5,copyimg,*.orig,apidoc.go" -L uint,iff,od,seeked,splitted,marge,ERRO,hist,ether -w
+ codespell -S bin,vendor,.git,go.sum,.cirrus.yml,"RELEASE_NOTES.md,*.xz,*.gz,*.ps1,*.tar,swagger.yaml,*.tgz,bin2img,*ico,*.png,*.1,*.5,copyimg,*.orig,apidoc.go" -L pullrequest,uint,iff,od,seeked,splitted,marge,erro,hist,ether -w
.PHONY: validate
validate: lint .gitvalidation validate.completions man-page-check swagger-check tests-included tests-expect-exit
diff --git a/contrib/cirrus/CIModes.md b/contrib/cirrus/CIModes.md
index 8b1e33bb1..c782ca64b 100644
--- a/contrib/cirrus/CIModes.md
+++ b/contrib/cirrus/CIModes.md
@@ -27,7 +27,7 @@ outdated) example of it's output can be seen below:
[The list is documented](https://cirrus-ci.org/guide/writing-tasks/#environment-variables). Reference to any variables defined in YAML will **not** behave how
you expect, don't use them!
-* Somme Cirrus-CI defined variables contain non-empty values outside their
+* Some Cirrus-CI defined variables contain non-empty values outside their
obvious context. For example, when running for a PR a task will have
`$CIRRUS_BRANCH` set to `pull/<number>`.
diff --git a/go.mod b/go.mod
index e9f2ce343..023b9c025 100644
--- a/go.mod
+++ b/go.mod
@@ -11,13 +11,13 @@ require (
github.com/container-orchestrated-devices/container-device-interface v0.4.0
github.com/containernetworking/cni v1.1.1
github.com/containernetworking/plugins v1.1.1
- github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c
- github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9
+ github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab
+ github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a
github.com/containers/conmon v2.0.20+incompatible
- github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c
+ github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c
github.com/containers/ocicrypt v1.1.5
github.com/containers/psgo v1.7.2
- github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35
+ github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3
@@ -64,8 +64,8 @@ require (
github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
- golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
+ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
+ golang.org/x/sys v0.0.0-20220624220833-87e55d714810
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
golang.org/x/text v0.3.7
google.golang.org/protobuf v1.28.0
diff --git a/go.sum b/go.sum
index f31cebeb8..8e0a9ad83 100644
--- a/go.sum
+++ b/go.sum
@@ -28,16 +28,31 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -49,6 +64,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
@@ -57,6 +73,7 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h
github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo=
github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
@@ -65,12 +82,20 @@ github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
+github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
@@ -80,10 +105,14 @@ github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
@@ -118,12 +147,12 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
-github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/ReneKroon/ttlcache/v2 v2.11.0/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
@@ -146,6 +175,8 @@ github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
@@ -155,6 +186,20 @@ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.44.44/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go-v2 v1.16.5/go.mod h1:Wh7MEsmEApyL5hrWzpDkba4gwAPc5/piwLVLFnCxp48=
+github.com/aws/aws-sdk-go-v2/config v1.15.11/go.mod h1:mD5tNFciV7YHNjPpFYqJ6KGpoSfY107oZULvTHIxtbI=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.6/go.mod h1:mQgnRmBPF2S/M01W4T4Obp3ZaZB6o1s/R8cOUda9vtI=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6/go.mod h1:ClLMcuQA/wcHPmOIfNzNI4Y1Q0oDbmEkbYhMFOzHDh8=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12/go.mod h1:Afj/U8svX6sJ77Q+FPWMzabJ9QjbwP32YlopgKALUpg=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6/go.mod h1:FwpAKI+FBPIELJIdmQzlLtRe8LQSOreMcM2wBsPMvvc=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13/go.mod h1:hiM/y1XPp3DoEPhoVEYc/CZcS58dP6RKJRDFp99wdX0=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6/go.mod h1:DxAPjquoEHf3rUHh1b9+47RAaXB8/7cB6jkzCt/GOEI=
+github.com/aws/aws-sdk-go-v2/service/kms v1.17.3/go.mod h1:EKkrWWXwWYf8x3Nrm6Oix3zZP9NRBHqxw5buFGVBHA0=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.9/go.mod h1:UqRD9bBt15P0ofRyDZX6CfsIqPpzeHOhZKWzgSuAzpo=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.7/go.mod h1:lVxTdiiSHY3jb1aeg+BBFtDzZGSUCv6qaNOyEGCJ1AY=
+github.com/aws/smithy-go v1.11.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af/go.mod h1:84CWnaDz4g1tEVnFLnuBigmGK15oPohy0RfvSN8d4eg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -186,6 +231,8 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
+github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@@ -215,6 +262,8 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -223,11 +272,14 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/container-orchestrated-devices/container-device-interface v0.4.0 h1:b/mROkfDr1W8fJ25T66iVheHFnWixgyxTOSbO8i7jp4=
github.com/container-orchestrated-devices/container-device-interface v0.4.0/go.mod h1:E1zcucIkq9P3eyNmY+68dBQsTcsXJh9cgRo2IVNScKQ=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
@@ -309,8 +361,9 @@ github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
-github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk=
github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -337,18 +390,15 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
-github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c h1:/fKyiLFFuceBPZGJ0Lig7ElURhfsslAOw1BOcItD+X8=
-github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c/go.mod h1:b0L+u2Dam7soWGn5sVTK31L++Xrf80AbGvK5z9D2+lw=
-github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9/go.mod h1:WBLwq+i7bicCpH54V70HM6s7jqDAESTlYnd05XXp0ac=
-github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9 h1:KeGIf6Z1R+16Sq+5/fhkoCCKa7wjQ6Ksnmo0beU1E2U=
-github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9/go.mod h1:Zt3D/IhgFyG1oaBrqsbn9NdH/4fkjsO2Y0ahP12ieu4=
+github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab h1:NeI0DOkTf3Tn4OpdjMhMubAfTPs2oCO5jUY5wnpv4qk=
+github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab/go.mod h1:iVtQtU6a+pbETBqIzg0oAWW3gTR1ItrAihJpLFFppmA=
+github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a h1:kdcruVl641VTIm8C3O58WRYcBTbnWCsh6AJymk28ScM=
+github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a/go.mod h1:1dA7JPGoSi83kjf5H4NIrGANyLOULyvFqV1bwvYFEek=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
-github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4/go.mod h1:OsX9sFexyGF0FCNAjfcVFv3IwMqDyLyV/WQY/roLPcE=
-github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471/go.mod h1:KntCBNQn3qOuZmQuJ38ORyTozmWXiuo05Vef2S0Sm5M=
-github.com/containers/image/v5 v5.21.2-0.20220615100411-a78a00792916/go.mod h1:hEf8L08Hrh/3fK4vLf5l7988MJmij2swfCBUzqgnhF4=
-github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c h1:U2F/FaMt8gPP8sIpBfvXMCP5gAZfyxoYZ7lmu0dwsXc=
-github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c/go.mod h1:hEf8L08Hrh/3fK4vLf5l7988MJmij2swfCBUzqgnhF4=
+github.com/containers/image/v5 v5.21.2-0.20220712113758-29aec5f7bbbf/go.mod h1:0+N0ZM9mgMmoZZc6uNcgnEsbX85Ne7b29cIW5lqWwVU=
+github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c h1:ms1Vyzs9Eb17J38aFKrL0+ig2pVwQq3OleaO7VmQuV0=
+github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c/go.mod h1:ykVAVRj4DhQNMHZDVU+KCtXjWBKpqiUe669eF0WBEEc=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -356,19 +406,16 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
-github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ=
github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc=
github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0=
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
-github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
github.com/containers/storage v1.41.0/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s=
-github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6/go.mod h1:6XQ68cEG8ojfP/m3HIupFV1rZsnqeFmaE8N1ctBP94Y=
-github.com/containers/storage v1.41.1-0.20220614214904-388efef4bf7e/go.mod h1:6XQ68cEG8ojfP/m3HIupFV1rZsnqeFmaE8N1ctBP94Y=
-github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35 h1:6o+kw2z0BrdJ1wNYUbwLVzlb/65KPmZSy+32GNfppzM=
-github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35/go.mod h1:6XQ68cEG8ojfP/m3HIupFV1rZsnqeFmaE8N1ctBP94Y=
+github.com/containers/storage v1.41.1-0.20220712184034-d26be7b27860/go.mod h1:uu6HCcijN30xRxW1ZuZRngwFGOlH5NpBWYiNBnDQNRw=
+github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a h1:+arJAP0v8kEy5fKRPIELjarjpwUHhB7SyRE0uFXlyKY=
+github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a/go.mod h1:4DfR+cPpkXKhJnnyydD3z82DXrnTBT63y1k0QWtM2i4=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -377,13 +424,15 @@ github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-oidc/v3 v3.2.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a h1:W8b4lQ4tFF21aspRGoBuCNV6V2fFJBF+pm1J6OY8Lys=
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
@@ -417,15 +466,18 @@ github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCF
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/go-libvirt v0.0.0-20201209184759-e2a69bcd5bd1 h1:j6vGflaQ2T7yOWqVgPdiRF73j/U2Zmpbbzab8nyDCRQ=
github.com/digitalocean/go-libvirt v0.0.0-20201209184759-e2a69bcd5bd1/go.mod h1:QS1XzqZLcDniNYrN7EZefq3wIyb/M2WmJbql4ZKoc1Q=
github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001 h1:WAg57gnaAWWjMAELcwHjc2xy0PoXQ5G+vn3+XS6s1jI=
github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001/go.mod h1:IetBE52JfFxK46p2n2Rqm+p5Gx1gpu2hRHsrbnPOWZQ=
+github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8=
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v20.10.16+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@@ -433,7 +485,6 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.15+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
@@ -463,6 +514,7 @@ github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4 h1:Tc//0LMiRsUsOIu4S+H
github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4/go.mod h1:jN1ZaUPSNA8jm10nmaRLky84qV/iCeiHmcEf3EbP+dc=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eggsampler/acme/v3 v3.2.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -474,24 +526,39 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE=
github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
+github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01/go.mod h1:ypD5nozFk9vcGw1ATYefw6jHe/jZP++Z15/+VTMcWhc=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52/go.mod h1:yIquW87NGRw1FU5p5lEkpnt/QxoH5uPAOUlOVkAUuMg=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
+github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@@ -509,14 +576,19 @@ github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49P
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
+github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
@@ -544,12 +616,23 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
+github.com/go-rod/rod v0.107.3/go.mod h1:4SqYRUrcc4dSr9iT36YRZ4hdUAPg3A0O8RhxAMh0eCQ=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
+github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
@@ -562,6 +645,28 @@ github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslW
github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
+github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw=
+github.com/gobuffalo/envy v1.8.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
+github.com/gobuffalo/envy v1.9.0/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
+github.com/gobuffalo/fizz v1.10.0/go.mod h1:J2XGPO0AfJ1zKw7+2BA+6FEGAkyEsdCOLvN93WCT2WI=
+github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
+github.com/gobuffalo/genny/v2 v2.0.5/go.mod h1:kRkJuAw9mdI37AiEYjV4Dl+TgkBDYf8HZVjLkqe5eBg=
+github.com/gobuffalo/github_flavored_markdown v1.1.0/go.mod h1:TSpTKWcRTI0+v7W3x8dkSKMLJSUpuVitlptCkpeY8ic=
+github.com/gobuffalo/helpers v0.6.0/go.mod h1:pncVrer7x/KRvnL5aJABLAuT/RhKRR9klL6dkUOhyv8=
+github.com/gobuffalo/helpers v0.6.1/go.mod h1:wInbDi0vTJKZBviURTLRMFLE4+nF2uRuuL2fnlYo7w4=
+github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM=
+github.com/gobuffalo/nulls v0.2.0/go.mod h1:w4q8RoSCEt87Q0K0sRIZWYeIxkxog5mh3eN3C/n+dUc=
+github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q=
+github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI=
+github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g=
+github.com/gobuffalo/plush/v4 v4.0.0/go.mod h1:ErFS3UxKqEb8fpFJT7lYErfN/Nw6vHGiDMTjxpk5bQ0=
+github.com/gobuffalo/pop/v5 v5.3.1/go.mod h1:vcEDhh6cJ3WVENqJDFt/6z7zNb7lLnlN8vj3n5G9rYA=
+github.com/gobuffalo/tags/v3 v3.0.2/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA=
+github.com/gobuffalo/tags/v3 v3.1.0/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA=
+github.com/gobuffalo/validate/v3 v3.0.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0=
+github.com/gobuffalo/validate/v3 v3.1.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
@@ -573,6 +678,7 @@ github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -582,6 +688,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -618,7 +726,9 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
@@ -633,6 +743,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -646,9 +757,12 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/go-containerregistry v0.10.0 h1:qd/fv2nQajGZJenaNcdaghlwSPjQ0NphN9hzArr2WWg=
+github.com/google/go-containerregistry v0.10.0/go.mod h1:C7uwbB1QUAtvnknyd3ethxJRd4gtEjU/9WLXzckfI1Y=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -686,12 +800,19 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
@@ -702,7 +823,6 @@ github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc=
@@ -742,22 +862,47 @@ github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FK
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
+github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw=
+github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
+github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
+github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -770,6 +915,14 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/hashicorp/vault/api v1.7.2/go.mod h1:xbfA+1AvxFseDzxxdWaL0uO99n1+tndus4GCrtouy0M=
+github.com/hashicorp/vault/sdk v0.5.1/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU=
+github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
+github.com/honeycombio/beeline-go v1.1.1 h1:sU8r4ae34uEL3/CguSl8Mr+Asz9DL1nfH9Wwk85Pc7U=
+github.com/honeycombio/beeline-go v1.1.1/go.mod h1:kN0cfUGBMfA87DyCYbiiLoSzWsnw3bluZvNEWtatHxk=
+github.com/honeycombio/libhoney-go v1.15.2 h1:5NGcjOxZZma13dmzNcl3OtGbF1hECA0XHJNHEb2t2ck=
+github.com/honeycombio/libhoney-go v1.15.2/go.mod h1:JzhRPYgoBCd0rZvudrqmej4Ntx0w7AT3wAJpf5+t1WA=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
@@ -790,7 +943,40 @@ github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyH
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
+github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
+github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
@@ -801,8 +987,12 @@ github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
@@ -815,6 +1005,7 @@ github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqo
github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -827,22 +1018,26 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
-github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
-github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA=
+github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -857,6 +1052,7 @@ github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
@@ -864,16 +1060,26 @@ github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5
github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg=
+github.com/labstack/echo/v4 v4.3.0/go.mod h1:PvmtTvhVqKDzDQy4d3bWzPjZLzom4iQbAZy2sgZ/qI8=
+github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e h1:1aV3EJ4ZMsc63MFU4rB+ccSEhZvvVD71T9RA4Rqd3hI=
+github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e/go.mod h1:Bl3mfF2LHYepsU2XfzMceIglyByfPe1IFAXtO+p37Qk=
+github.com/letsencrypt/challtestsrv v1.2.1/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc=
github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
@@ -885,18 +1091,27 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
+github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc=
+github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
@@ -913,6 +1128,9 @@ github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
@@ -928,9 +1146,12 @@ github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZ
github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg=
github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0=
+github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
@@ -940,19 +1161,25 @@ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
+github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
@@ -986,6 +1213,7 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8=
github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s=
+github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
@@ -1005,6 +1233,8 @@ github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
@@ -1013,6 +1243,7 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -1024,11 +1255,13 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
@@ -1038,8 +1271,10 @@ github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDs
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -1051,7 +1286,6 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4=
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg=
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q=
github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc h1:qjkUzmFsOFbQyjObybk40mRida83j5IHRaKzLGdBbEU=
@@ -1078,8 +1312,8 @@ github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w=
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
-github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805 h1:bfLqBGqF04GAoqbMkSrd5VPk/t66OnP0bTTisMaF4ro=
-github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
+github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df h1:vf6pdI10F2Tim5a9JKiVVl4/dpNz1OEhz4EnfLdLtiA=
+github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
@@ -1089,6 +1323,7 @@ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
@@ -1096,6 +1331,9 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
+github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -1108,19 +1346,21 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
-github.com/proglottis/gpgme v0.1.2 h1:dKlhDqJ0kdEt+YHCD8FQEUdF9cJj/+mbJUNyUGNAEzY=
-github.com/proglottis/gpgme v0.1.2/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
+github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
+github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -1132,10 +1372,12 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -1165,12 +1407,17 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0=
github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rootless-containers/rootlesskit v1.0.1 h1:jepqW1txFSowKSMAEkVhWH3Oa1TCY9S400MVYe/6Iro=
github.com/rootless-containers/rootlesskit v1.0.1/go.mod h1:t2UAiYagxrJ+wmpFAUIZPcqsm4k2B7ve6g7lILKbloc=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -1178,6 +1425,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg=
github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
@@ -1188,23 +1437,28 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U=
+github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc=
+github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 h1:5TPCWtlOsaCiuAaglfZX7obd+/kuE8lGUhsVQzmQSaI=
+github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1/go.mod h1:y83NePRM98MJpbGgBgi54UZduhG0aD7lYngAVCx+i/E=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -1212,6 +1466,7 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
@@ -1219,7 +1474,9 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
@@ -1230,6 +1487,7 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
@@ -1269,11 +1527,10 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
github.com/sylabs/sif/v2 v2.7.1 h1:XXt9AP39sQfsMCGOGQ/XP9H47yqZOvAonalkaCaNIYM=
github.com/sylabs/sif/v2 v2.7.1/go.mod h1:bBse2nEFd3yHkmq6KmAOFEWQg5LdFYiQUdVcgamxlc8=
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
@@ -1281,6 +1538,8 @@ github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
@@ -1288,7 +1547,11 @@ github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
+github.com/theupdateframework/go-tuf v0.3.0 h1:od2sc5+BSkKZhmUG2o2rmruy0BGSmhrbDhCnpxh87X8=
+github.com/theupdateframework/go-tuf v0.3.0/go.mod h1:E5XP0wXitrFUHe4b8cUcAAdxBW4LbfnqF4WXXGLgWNo=
github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -1298,12 +1561,15 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1
github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
github.com/u-root/uio v0.0.0-20210528114334-82958018845c/go.mod h1:LpEX5FO/cB+WF4TYGY1V5qktpaZLkKkSegbr0V4eYXA=
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
@@ -1319,11 +1585,12 @@ github.com/urfave/cli/v2 v2.5.1/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDq
github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
+github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
-github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo=
github.com/vbauerster/mpb/v7 v7.4.2 h1:n917F4d8EWdUKc9c81wFkksyG6P6Mg7IETfKCE1Xqng=
github.com/vbauerster/mpb/v7 v7.4.2/go.mod h1:UmOiIUI8aPqWXIps0ciik3RKMdzx7+ooQpq+fBcXwBA=
github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
@@ -1338,6 +1605,12 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U=
+github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
+github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY=
+github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
+github.com/weppos/publicsuffix-go v0.15.1-0.20210807195340-dc689ff0bb59/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
+github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -1352,6 +1625,11 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc=
+github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
+github.com/ysmood/got v0.31.2/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY=
+github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM=
+github.com/ysmood/gson v0.7.1/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
+github.com/ysmood/leakless v0.7.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
@@ -1365,6 +1643,12 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
+github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
+github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
+github.com/zmap/zcrypto v0.0.0-20210811211718-6f9bc4aff20f/go.mod h1:y/9hjFEub4DtQxTHp/pqticBgdYeCwL97vojV3lsvHY=
+github.com/zmap/zlint/v3 v3.3.1-0.20211019173530-cb17369b4628/go.mod h1:O+4OXRfNLKqOyDl4eKZ1SBlYudKGUBGRFcv+m1KLr28=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
@@ -1391,24 +1675,33 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/contrib/propagators v0.19.0 h1:HrixVNZYFjUl/Db+Tr3DhqzLsVW9GeVf/Gye+C5dNUY=
+go.opentelemetry.io/contrib/propagators v0.19.0/go.mod h1:4QOdZClXISU5S43xZxk5tYaWcpb+lehqfKtE6PK6msE=
+go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y=
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
+go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY=
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
@@ -1416,6 +1709,7 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
@@ -1423,9 +1717,11 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+goji.io/v3 v3.0.0/go.mod h1:c02FFnNiVNCDo+DpR2IhBQpM9r5G1BG/MkHNTPUJ13U=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1433,6 +1729,7 @@ golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -1440,16 +1737,22 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1490,6 +1793,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1527,12 +1831,14 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -1551,15 +1857,24 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220524220425-1d687d428aca/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1576,6 +1891,13 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1587,8 +1909,10 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1601,6 +1925,7 @@ golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1619,6 +1944,7 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1658,6 +1984,7 @@ golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1678,6 +2005,8 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1694,6 +2023,7 @@ golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1704,6 +2034,7 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1711,14 +2042,22 @@ golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1742,9 +2081,11 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1762,6 +2103,7 @@ golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
@@ -1771,6 +2113,7 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1799,6 +2142,7 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
@@ -1844,6 +2188,7 @@ golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
@@ -1852,14 +2197,20 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -1889,7 +2240,20 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1897,6 +2261,7 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -1949,6 +2314,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
@@ -1963,9 +2329,32 @@ google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwy
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 h1:U9V52f6rAgINH7kT+musA1qF8kWyVOxzF8eYuOVuFwQ=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f h1:hJ/Y5SqPXbarffmAsApliUlcvMU+wScNGfyop4bZm8o=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1997,10 +2386,16 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -2019,6 +2414,8 @@ google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscL
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -2032,6 +2429,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -2041,8 +2439,10 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
+gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
+gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
diff --git a/test/apiv2/12-imagesMore.at b/test/apiv2/12-imagesMore.at
index fc18dd2d7..d4b09174f 100644
--- a/test/apiv2/12-imagesMore.at
+++ b/test/apiv2/12-imagesMore.at
@@ -63,7 +63,7 @@ podman pull -q $IMAGE
podman system connection add --default test ssh://$USER@localhost/run/user/$UID/podman/podman.sock
# should fail but need to check the output...
# status 125 here means that the save/load fails due to
-# cirrus weirdness with exec.Command. All of the args have been parsed sucessfully.
+# cirrus weirdness with exec.Command. All of the args have been parsed successfully.
t POST "libpod/images/scp/$IMAGE?destination=QA::" 500 \
.cause="exit status 125"
t DELETE libpod/images/$IMAGE 200 \
diff --git a/test/buildah-bud/apply-podman-deltas b/test/buildah-bud/apply-podman-deltas
index 0b691dd0e..d6d539fa4 100755
--- a/test/buildah-bud/apply-podman-deltas
+++ b/test/buildah-bud/apply-podman-deltas
@@ -129,10 +129,10 @@ errmsg "no such file or directory" \
errmsg "no such file or directory" \
"Error: no context directory and no Containerfile specified" \
- "bud without any arguments should fail when no Dockerfile exist"
+ "bud without any arguments should fail when no Dockerfile exists"
errmsg "is not a file" \
- "Error: open .*: no such file or directory" \
+ "Error: containerfile: .* cannot be path to a directory" \
"bud with specified context should fail if assumed Dockerfile is a directory"
errmsg "no such file or directory" \
@@ -197,7 +197,7 @@ skip_if_remote "--build-context option not implemented in podman-remote" \
"build-with-additional-build-context and COPY, additional context from host" \
"build-with-additional-build-context and RUN --mount=from=, additional-context not image and also test conflict with stagename" \
-skip_if_remote "env-variable for Containerfile.in pre-processing is not propogated on remote" \
+skip_if_remote "env-variable for Containerfile.in pre-processing is not propagated on remote" \
"bud with Containerfile.in, via envariable" \
# Requires a local file outside context dir
@@ -215,7 +215,8 @@ skip_if_remote "--output option not implemented in podman-remote" \
"build with custom build output and output rootfs to directory" \
"build with custom build output and output rootfs to tar" \
"build with custom build output and output rootfs to tar by pipe" \
- "build with custom build output must fail for bad input"
+ "build with custom build output must fail for bad input" \
+ "build with custom build output and output rootfs to tar with no additional step"
# https://github.com/containers/podman/issues/14544
skip_if_remote "logfile not implemented on remote" "bud-logfile-with-split-logfile-by-platform"
@@ -223,6 +224,10 @@ skip_if_remote "logfile not implemented on remote" "bud-logfile-with-split-logfi
skip_if_remote "envariables do not automatically work with -remote." \
"build proxy"
+# 2022-07-04 this is a new test in buildah; it's failing in treadmill
+skip_if_remote "FIXME FIXME FIXME: does this test make sense in remote?" \
+ "build-test with OCI prestart hook"
+
###############################################################################
# BEGIN tests which are skipped due to actual podman or podman-remote bugs.
diff --git a/test/buildah-bud/run-buildah-bud-tests b/test/buildah-bud/run-buildah-bud-tests
index 4ff062496..d0e2e3237 100755
--- a/test/buildah-bud/run-buildah-bud-tests
+++ b/test/buildah-bud/run-buildah-bud-tests
@@ -94,10 +94,13 @@ fi
set -e
# Run sudo early, to refresh the credentials cache. This is a NOP under CI,
-# but might be appreciated by developers who run this script, step away
+# but might be appreciated by developers who run this script, step away
# during the git-checkout-buildah step, then come back twenty minutes later
-# to an expired sudo prompt and no tests have run.
-sudo --validate
+# to an expired sudo prompt and no tests have run. (No need to do this
+# for checkout; only when running tests)
+if [[ -n $do_test ]]; then
+ sudo --validate
+fi
# Before pulling buildah (while still cd'ed to podman repo), try to determine
# if this is a PR, and if so if it's a revendoring of buildah. We use this to
diff --git a/test/system/010-images.bats b/test/system/010-images.bats
index 69ed1004c..638910302 100644
--- a/test/system/010-images.bats
+++ b/test/system/010-images.bats
@@ -259,8 +259,8 @@ Labels.created_at | 20[0-9-]\\\+T[0-9:]\\\+Z
run_podman 2 rmi -a
is "$output" "Error: 2 errors occurred:
-.** Image used by .*: image is in use by a container
-.** Image used by .*: image is in use by a container"
+.** image used by .*: image is in use by a container
+.** image used by .*: image is in use by a container"
run_podman rmi -af
is "$output" "Untagged: $IMAGE
@@ -292,7 +292,7 @@ Deleted: $pauseID" "infra images gets removed as well"
pauseID=$output
run_podman 2 rmi $pauseImage
- is "$output" "Error: Image used by .* image is in use by a container"
+ is "$output" "Error: image used by .* image is in use by a container"
run_podman rmi -f $pauseImage
is "$output" "Untagged: $pauseImage
diff --git a/test/system/120-load.bats b/test/system/120-load.bats
index 7f0bcfd95..8dcdd8bdd 100644
--- a/test/system/120-load.bats
+++ b/test/system/120-load.bats
@@ -158,6 +158,7 @@ verify_iid_and_name() {
run_podman 125 image scp $nope ${notme}@localhost::
is "$output" "Error: $nope: image not known.*" "Pushing nonexistent image"
+ run_podman rmi foobar:123
}
diff --git a/test/system/170-run-userns.bats b/test/system/170-run-userns.bats
index 84788a7f4..2ad9eb0b8 100644
--- a/test/system/170-run-userns.bats
+++ b/test/system/170-run-userns.bats
@@ -124,7 +124,7 @@ EOF
run_podman rm -t 0 --force ${cid}
else
run_podman 125 run -d --userns=nomap $IMAGE sleep 100
- is "${output}" "Error: nomap is only supported in rootless mode" "Container should fail to start since nomap is not suppored in rootful mode"
+ is "${output}" "Error: nomap is only supported in rootless mode" "Container should fail to start since nomap is not supported in rootful mode"
fi
}
@@ -135,6 +135,6 @@ EOF
is "${output}" "$user" "Container should run as the current user"
else
run_podman 125 run --rm --userns=keep-id $IMAGE id -u
- is "${output}" "Error: keep-id is only supported in rootless mode" "Container should fail to start since keep-id is not suppored in rootful mode"
+ is "${output}" "Error: keep-id is only supported in rootless mode" "Container should fail to start since keep-id is not supported in rootful mode"
fi
}
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index b93f3f92f..7b7f5e8bb 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -322,7 +322,7 @@ EOF
is "$output" "" "output from pod create should be empty"
run_podman 125 pod create --infra-name "$infra_name"
- assert "$output" =~ "^Error: .*: the container name \"$infra_name\" is already in use by .* You have to remove that container to be able to reuse that name.: that name is already in use" \
+ assert "$output" =~ "^Error: .*: the container name \"$infra_name\" is already in use by .* You have to remove that container to be able to reuse that name: that name is already in use" \
"Trying to create two pods with same infra-name"
run_podman pod rm -f $pod_name
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
index 895d6645a..ca1383823 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
@@ -3,7 +3,7 @@ module github.com/containerd/stargz-snapshotter/estargz
go 1.16
require (
- github.com/klauspost/compress v1.15.1
+ github.com/klauspost/compress v1.15.7
github.com/opencontainers/go-digest v1.0.0
github.com/vbatts/tar-split v0.11.2
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
index 8b44342da..493da9356 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
@@ -1,8 +1,8 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
-github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
index 8f27dfb3e..37448cae0 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
@@ -32,6 +32,7 @@ import (
"fmt"
"io"
"os"
+ "path/filepath"
"reflect"
"sort"
"strings"
@@ -1313,6 +1314,18 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
),
wantFailOnLossLess: true,
},
+ {
+ name: "hardlink should be replaced to the destination entry",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", "test"),
+ link("foolink", "foo/foo1"),
+ ),
+ wantNumGz: 4, // dir, foo1 + link, TOC, footer
+ want: checks(
+ mustSameEntry("foo/foo1", "foolink"),
+ ),
+ },
}
for _, tt := range tests {
@@ -1730,6 +1743,60 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
})
}
+func mustSameEntry(files ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ var first *TOCEntry
+ for _, f := range files {
+ if first == nil {
+ var ok bool
+ first, ok = r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown first file on Lookup: %q", f)
+ return
+ }
+ }
+
+ // Test Lookup
+ e, ok := r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown file on Lookup: %q", f)
+ return
+ }
+ if e != first {
+ t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test LookupChild
+ pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get parent of %q", f)
+ return
+ }
+ e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get %q as the child of %+v", f, pe)
+ return
+ }
+ if e != first {
+ t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test ForeachChild
+ pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
+ if baseName == filepath.Base(filepath.Clean(f)) {
+ if e != first {
+ t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return false
+ }
+ }
+ return true
+ })
+ }
+ })
+}
+
func tarOf(s ...tarEntry) []tarEntry { return s }
type tarEntry interface {
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
index 384ff7fd7..3bc74463e 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
@@ -159,7 +159,8 @@ type TOCEntry struct {
// NumLink is the number of entry names pointing to this entry.
// Zero means one name references this entry.
- NumLink int
+ // This field is calculated during runtime and not recorded in TOC JSON.
+ NumLink int `json:"-"`
// Xattrs are the extended attribute for the entry.
Xattrs map[string][]byte `json:"xattrs,omitempty"`
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 1b25b190c..70b59782a 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -51,7 +51,7 @@ gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e
timeout_in: 120m
# Default VM to use unless set or modified by task
-gce_instance:
+gce_instance: &standardvm
image_project: "${IMAGE_PROJECT}"
zone: "us-central1-c" # Required by Cirrus for the time being
cpu: 2
@@ -76,6 +76,7 @@ meta_task:
${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME}
+ build-push-${IMAGE_SUFFIX}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14]
@@ -93,9 +94,8 @@ smoke_task:
gce_instance:
memory: "12Gb"
- # N/B: Skip running this on branches due to multiple bugs in
- # the git-validate tool which are difficult to debug and fix.
- skip: $CIRRUS_PR == ''
+ # Don't bother running on branches (including cron), or for tags.
+ only_if: $CIRRUS_PR != ''
timeout_in: 30m
@@ -111,6 +111,7 @@ smoke_task:
vendor_task:
name: "Test Vendoring"
alias: vendor
+ only_if: &not_multiarch $CIRRUS_CRON != 'multiarch'
env:
CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah"
@@ -134,7 +135,9 @@ vendor_task:
cross_build_task:
name: "Cross Compile"
alias: cross_build
- only_if: &not_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: >-
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
+ $CIRRUS_CRON != 'multiarch'
osx_instance:
image: 'big-sur-base'
@@ -154,7 +157,10 @@ cross_build_task:
unit_task:
name: 'Unit tests w/ $STORAGE_DRIVER'
alias: unit
- only_if: *not_docs
+ only_if: &not_build_docs >-
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
+ $CIRRUS_CRON != 'multiarch'
depends_on: &smoke_vendor_cross
- smoke
- vendor
@@ -179,7 +185,7 @@ unit_task:
conformance_task:
name: 'Build Conformance w/ $STORAGE_DRIVER'
alias: conformance
- only_if: *not_docs
+ only_if: *not_build_docs
depends_on: *smoke_vendor_cross
gce_instance:
@@ -200,7 +206,7 @@ conformance_task:
integration_task:
name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
alias: integration
- only_if: *not_docs
+ only_if: *not_build_docs
depends_on: *smoke_vendor_cross
matrix:
@@ -255,7 +261,7 @@ integration_task:
integration_rootless_task:
name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER"
alias: integration_rootless
- only_if: *not_docs
+ only_if: *not_build_docs
depends_on: *smoke_vendor_cross
matrix:
@@ -294,7 +300,7 @@ integration_rootless_task:
in_podman_task:
name: "Containerized Integration"
alias: in_podman
- only_if: *not_docs
+ only_if: *not_build_docs
depends_on: *smoke_vendor_cross
env:
@@ -315,6 +321,52 @@ in_podman_task:
<<: *standardlogs
+image_build_task: &image-build
+ name: "Build multi-arch $CTXDIR"
+ alias: image_build
+ # Some of these container images take > 1h to build, limit
+ # this task to a specific Cirrus-Cron entry with this name.
+ only_if: $CIRRUS_CRON == 'multiarch'
+ depends_on:
+ - smoke
+ timeout_in: 120m # emulation is sssllllooooowwww
+ gce_instance:
+ <<: *standardvm
+ image_name: build-push-${IMAGE_SUFFIX}
+ # More muscle required for parallel multi-arch build
+ type: "n2-standard-4"
+ matrix:
+ - env:
+ CTXDIR: contrib/buildahimage/upstream
+ - env:
+ CTXDIR: contrib/buildahimage/testing
+ - env:
+ CTXDIR: contrib/buildahimage/stable
+ env:
+ DISTRO_NV: "${FEDORA_NAME}" # Required for repo cache extraction
+ BUILDAH_USERNAME: ENCRYPTED[70e1d4f026cba5d82fc067944baab10f7c71c64bb6b75fce4eeb5c106694b3bbc8e08f8a1b926d6e03e85cf4e21833bb]
+ BUILDAH_PASSWORD: ENCRYPTED[2dc7f4f623bfc856e1d5030df263b9e48ddab39abacea7a8bc714179c188df15fc0a5bb5d3414a24637d4e39aa51b7b5]
+ CONTAINERS_USERNAME: ENCRYPTED[88cd93c753f78d70e4beb5dbebd4402d682daf45793d7e0fe8b75b358f768e8734aef3f130ffb4ebca9bdea8d220a188]
+ CONTAINERS_PASSWORD: ENCRYPTED[886cf4cc126e50b2fd7f2792235a22bb79e4b81db43f803a6214a38d3fd6c04cd4e64570b562cb32b04e5fbc435404b6]
+ main_script:
+ - source /etc/automation_environment
+ - main.sh $CIRRUS_REPO_CLONE_URL $CTXDIR
+
+
+test_image_build_task:
+ <<: *image-build
+ alias: test_image_build
+ # Allow this to run inside a PR w/ [CI:BUILD] only.
+ only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
+ # This takes a LONG time, only run when requested. N/B: Any task
+ # made to depend on this one will block FOREVER unless triggered.
+ # DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`.
+ trigger_type: manual
+ # Overwrite all 'env', don't push anything, just do the build.
+ env:
+ DRYRUN: 1
+
+
# Status aggregator for all tests. This task simply ensures a defined
# set of tasks all passed, and allows confirming that based on the status
# of this task.
@@ -331,6 +383,7 @@ success_task:
- cross_build
- integration
- in_podman
+ - image_build
container:
image: "quay.io/libpod/alpine:latest"
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
index c9121cc87..b40462eea 100644
--- a/vendor/github.com/containers/buildah/Makefile
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -78,8 +78,9 @@ ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dis
LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS))
DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS))
WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS)))
+FREEBSD_CROSS_TARGETS := $(filter bin/buildah.freebsd.%,$(ALL_CROSS_TARGETS))
.PHONY: cross
-cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS)
+cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) $(FREEBSD_CROSS_TARGETS)
bin/buildah.%:
mkdir -p ./bin
@@ -114,7 +115,6 @@ validate: install.tools
./tests/validate/whitespace.sh
./hack/xref-helpmsgs-manpages
./tests/validate/pr-should-include-tests
- ./tests/validate/buildahimages-are-sane
.PHONY: install.tools
install.tools:
@@ -189,3 +189,9 @@ vendor:
.PHONY: lint
lint: install.tools
./tests/tools/build/golangci-lint run $(LINTFLAGS)
+
+# CAUTION: This is not a replacement for RPMs provided by your distro.
+# Only intended to build and test the latest unreleased changes.
+.PHONY: rpm
+rpm:
+ rpkg local
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 8aa53a292..1f820ea55 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -2,6 +2,7 @@ package buildah
import (
"archive/tar"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -24,7 +25,6 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -88,6 +88,11 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
return err
}
defer response.Body.Close()
+
+ if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusBadRequest {
+ return fmt.Errorf("invalid response status %d", response.StatusCode)
+ }
+
// Figure out what to name the new content.
name := renameTarget
if name == "" {
@@ -100,7 +105,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
if lastModified != "" {
d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
- return errors.Wrapf(err, "error parsing last-modified time")
+ return fmt.Errorf("error parsing last-modified time: %w", err)
}
date = d
}
@@ -112,17 +117,17 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
// we can figure out how much content there is.
f, err := ioutil.TempFile(mountpoint, "download")
if err != nil {
- return errors.Wrapf(err, "error creating temporary file to hold %q", src)
+ return fmt.Errorf("error creating temporary file to hold %q: %w", src, err)
}
defer os.Remove(f.Name())
defer f.Close()
size, err = io.Copy(f, response.Body)
if err != nil {
- return errors.Wrapf(err, "error writing %q to temporary file %q", src, f.Name())
+ return fmt.Errorf("error writing %q to temporary file %q: %w", src, f.Name(), err)
}
_, err = f.Seek(0, io.SeekStart)
if err != nil {
- return errors.Wrapf(err, "error setting up to read %q from temporary file %q", src, f.Name())
+ return fmt.Errorf("error setting up to read %q from temporary file %q: %w", src, f.Name(), err)
}
responseBody = f
}
@@ -150,10 +155,14 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
}
err = tw.WriteHeader(&hdr)
if err != nil {
- return errors.Wrapf(err, "error writing header")
+ return fmt.Errorf("error writing header: %w", err)
}
- _, err = io.Copy(tw, responseBody)
- return errors.Wrapf(err, "error writing content from %q to tar stream", src)
+
+ if _, err := io.Copy(tw, responseBody); err != nil {
+ return fmt.Errorf("error writing content from %q to tar stream: %w", src, err)
+ }
+
+ return nil
}
// includeDirectoryAnyway returns true if "path" is a prefix for an exception
@@ -199,13 +208,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
contextDir = string(os.PathSeparator)
currentDir, err = os.Getwd()
if err != nil {
- return errors.Wrapf(err, "error determining current working directory")
+ return fmt.Errorf("error determining current working directory: %w", err)
}
} else {
if !filepath.IsAbs(options.ContextDir) {
contextDir, err = filepath.Abs(options.ContextDir)
if err != nil {
- return errors.Wrapf(err, "error converting context directory path %q to an absolute path", options.ContextDir)
+ return fmt.Errorf("error converting context directory path %q to an absolute path: %w", options.ContextDir, err)
}
}
}
@@ -233,7 +242,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
if err != nil {
- return errors.Wrapf(err, "checking on sources under %q", contextDir)
+ return fmt.Errorf("checking on sources under %q: %w", contextDir, err)
}
}
numLocalSourceItems := 0
@@ -247,15 +256,15 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
}
- return errors.Errorf("checking on sources under %q: %v", contextDir, errorText)
+ return fmt.Errorf("checking on sources under %q: %v", contextDir, errorText)
}
if len(localSourceStat.Globbed) == 0 {
- return errors.Wrapf(syscall.ENOENT, "checking source under %q: no glob matches", contextDir)
+ return fmt.Errorf("checking source under %q: no glob matches: %w", contextDir, syscall.ENOENT)
}
numLocalSourceItems += len(localSourceStat.Globbed)
}
if numLocalSourceItems+len(remoteSources) == 0 {
- return errors.Wrapf(syscall.ENOENT, "no sources %v found", sources)
+ return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT)
}
// Find out which user (and group) the destination should belong to.
@@ -264,14 +273,14 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if options.Chown != "" {
userUID, userGID, err = b.userForCopy(mountPoint, options.Chown)
if err != nil {
- return errors.Wrapf(err, "error looking up UID/GID for %q", options.Chown)
+ return fmt.Errorf("error looking up UID/GID for %q: %w", options.Chown, err)
}
}
var chmodDirsFiles *os.FileMode
if options.Chmod != "" {
p, err := strconv.ParseUint(options.Chmod, 8, 32)
if err != nil {
- return errors.Wrapf(err, "error parsing chmod %q", options.Chmod)
+ return fmt.Errorf("error parsing chmod %q: %w", options.Chmod, err)
}
perm := os.FileMode(p)
chmodDirsFiles = &perm
@@ -323,7 +332,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
if err != nil {
- return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
+ return fmt.Errorf("error checking on destination %v: %w", extractDirectory, err)
}
if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
// destination doesn't exist - extract to parent and rename the incoming file to the destination's name
@@ -339,7 +348,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
if destMustBeDirectory {
- return errors.Errorf("destination %v already exists but is not a directory", destination)
+ return fmt.Errorf("destination %v already exists but is not a directory", destination)
}
// destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
renameTarget = filepath.Base(extractDirectory)
@@ -348,7 +357,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
pm, err := fileutils.NewPatternMatcher(options.Excludes)
if err != nil {
- return errors.Wrapf(err, "error processing excludes list %v", options.Excludes)
+ return fmt.Errorf("error processing excludes list %v: %w", options.Excludes, err)
}
// Make sure that, if it's a symlink, we'll chroot to the target of the link;
@@ -356,7 +365,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
evalOptions := copier.EvalOptions{}
evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions)
if err != nil {
- return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
+ return fmt.Errorf("error checking on destination %v: %w", extractDirectory, err)
}
extractDirectory = evaluated
@@ -374,7 +383,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
ChownNew: chownDirs,
}
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
- return errors.Wrapf(err, "error ensuring target directory exists")
+ return fmt.Errorf("error ensuring target directory exists: %w", err)
}
// Copy each source in turn.
@@ -418,10 +427,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}()
wg.Wait()
if getErr != nil {
- getErr = errors.Wrapf(getErr, "error reading %q", src)
+ getErr = fmt.Errorf("error reading %q: %w", src, getErr)
}
if putErr != nil {
- putErr = errors.Wrapf(putErr, "error storing %q", src)
+ putErr = fmt.Errorf("error storing %q: %w", src, putErr)
}
multiErr = multierror.Append(getErr, putErr)
if multiErr != nil && multiErr.ErrorOrNil() != nil {
@@ -450,16 +459,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
for _, glob := range localSourceStat.Globbed {
rel, err := filepath.Rel(contextDir, glob)
if err != nil {
- return errors.Wrapf(err, "error computing path of %q relative to %q", glob, contextDir)
+ return fmt.Errorf("error computing path of %q relative to %q: %w", glob, contextDir, err)
}
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
- return errors.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
+ return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
}
// Check for dockerignore-style exclusion of this item.
if rel != "." {
excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
if err != nil {
- return errors.Wrapf(err, "error checking if %q(%q) is excluded", glob, rel)
+ return fmt.Errorf("error checking if %q(%q) is excluded: %w", glob, rel, err)
}
if excluded {
// non-directories that are excluded are excluded, no question, but
@@ -515,7 +524,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
closeErr = writer.Close()
if renameTarget != "" && renamedItems > 1 {
- renameErr = errors.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
+ renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
}
wg.Done()
}()
@@ -553,16 +562,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}()
wg.Wait()
if getErr != nil {
- getErr = errors.Wrapf(getErr, "error reading %q", src)
+ getErr = fmt.Errorf("error reading %q: %w", src, getErr)
}
if closeErr != nil {
- closeErr = errors.Wrapf(closeErr, "error closing %q", src)
+ closeErr = fmt.Errorf("error closing %q: %w", src, closeErr)
}
if renameErr != nil {
- renameErr = errors.Wrapf(renameErr, "error renaming %q", src)
+ renameErr = fmt.Errorf("error renaming %q: %w", src, renameErr)
}
if putErr != nil {
- putErr = errors.Wrapf(putErr, "error storing %q", src)
+ putErr = fmt.Errorf("error storing %q: %w", src, putErr)
}
multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
if multiErr != nil && multiErr.ErrorOrNil() != nil {
@@ -577,7 +586,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if options.IgnoreFile != "" {
excludesFile = " using " + options.IgnoreFile
}
- return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out%s)", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile)
+ return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT)
}
}
return nil
@@ -599,7 +608,7 @@ func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, st
if !strings.Contains(userspec, ":") {
groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
if err2 != nil {
- if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
+ if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil {
err = err2
}
} else {
@@ -629,7 +638,7 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3
// If userspec did not specify any values for user or group, then fail
if user == "" && group == "" {
- return 0, 0, errors.Errorf("can't find uid for user %s", userspec)
+ return 0, 0, fmt.Errorf("can't find uid for user %s", userspec)
}
// If userspec specifies values for user or group, check for numeric values
diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go
index 83ca2933f..8e5ad458c 100644
--- a/vendor/github.com/containers/buildah/bind/mount.go
+++ b/vendor/github.com/containers/buildah/bind/mount.go
@@ -4,6 +4,7 @@
package bind
import (
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -14,7 +15,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -29,28 +29,28 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
// We expect a root directory to be defined.
if spec.Root == nil {
- return nil, errors.Errorf("configuration has no root filesystem?")
+ return nil, errors.New("configuration has no root filesystem?")
}
rootPath := spec.Root.Path
// Create a new mount namespace in which to do the things we're doing.
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
- return nil, errors.Wrapf(err, "error creating new mount namespace for %v", spec.Process.Args)
+ return nil, fmt.Errorf("error creating new mount namespace for %v: %w", spec.Process.Args, err)
}
// Make all of our mounts private to our namespace.
if err := mount.MakeRPrivate("/"); err != nil {
- return nil, errors.Wrapf(err, "error making mounts private to mount namespace for %v", spec.Process.Args)
+ return nil, fmt.Errorf("error making mounts private to mount namespace for %v: %w", spec.Process.Args, err)
}
// Make sure the bundle directory is searchable. We created it with
// TempDir(), so it should have started with permissions set to 0700.
info, err := os.Stat(bundlePath)
if err != nil {
- return nil, errors.Wrapf(err, "error checking permissions on %q", bundlePath)
+ return nil, fmt.Errorf("error checking permissions on %q: %w", bundlePath, err)
}
if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil {
- return nil, errors.Wrapf(err, "error loosening permissions on %q", bundlePath)
+ return nil, fmt.Errorf("error loosening permissions on %q: %w", bundlePath, err)
}
// Figure out who needs to be able to reach these bind mounts in order
@@ -117,23 +117,23 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
// access.
mnt := filepath.Join(bundlePath, "mnt")
if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil {
- return unmountAll, errors.Wrapf(err, "error creating %q owned by the container's root user", mnt)
+ return unmountAll, fmt.Errorf("error creating %q owned by the container's root user: %w", mnt, err)
}
// Make that directory private, and add it to the list of locations we
// unmount at cleanup time.
if err = mount.MakeRPrivate(mnt); err != nil {
- return unmountAll, errors.Wrapf(err, "error marking filesystem at %q as private", mnt)
+ return unmountAll, fmt.Errorf("error marking filesystem at %q as private: %w", mnt, err)
}
unmount = append([]string{mnt}, unmount...)
// Create a bind mount for the root filesystem and add it to the list.
rootfs := filepath.Join(mnt, "rootfs")
if err = os.Mkdir(rootfs, 0000); err != nil {
- return unmountAll, errors.Wrapf(err, "error creating directory %q", rootfs)
+ return unmountAll, fmt.Errorf("error creating directory %q: %w", rootfs, err)
}
if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
- return unmountAll, errors.Wrapf(err, "error bind mounting root filesystem from %q to %q", rootPath, rootfs)
+ return unmountAll, fmt.Errorf("error bind mounting root filesystem from %q to %q: %w", rootPath, rootfs, err)
}
logrus.Debugf("bind mounted %q to %q", rootPath, rootfs)
unmount = append([]string{rootfs}, unmount...)
@@ -154,28 +154,28 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source)
continue
}
- return unmountAll, errors.Wrapf(err, "error checking if %q is a directory", spec.Mounts[i].Source)
+ return unmountAll, fmt.Errorf("error checking if %q is a directory: %w", spec.Mounts[i].Source, err)
}
stage := filepath.Join(mnt, fmt.Sprintf("buildah-bind-target-%d", i))
if info.IsDir() {
// If the source is a directory, make one to use as the
// mount target.
if err = os.Mkdir(stage, 0000); err != nil {
- return unmountAll, errors.Wrapf(err, "error creating directory %q", stage)
+ return unmountAll, fmt.Errorf("error creating directory %q: %w", stage, err)
}
} else {
// If the source is not a directory, create an empty
// file to use as the mount target.
file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000)
if err != nil {
- return unmountAll, errors.Wrapf(err, "error creating file %q", stage)
+ return unmountAll, fmt.Errorf("error creating file %q: %w", stage, err)
}
file.Close()
}
// Bind mount the source from wherever it is to a place where
// we know the runtime helper will be able to get to it...
if err = unix.Mount(spec.Mounts[i].Source, stage, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
- return unmountAll, errors.Wrapf(err, "error bind mounting bind object from %q to %q", spec.Mounts[i].Source, stage)
+ return unmountAll, fmt.Errorf("error bind mounting bind object from %q to %q: %w", spec.Mounts[i].Source, stage, err)
}
logrus.Debugf("bind mounted %q to %q", spec.Mounts[i].Source, stage)
spec.Mounts[i].Source = stage
@@ -209,7 +209,7 @@ func leaveBindMountAlone(mount specs.Mount) bool {
func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
mounts, err := mount.GetMounts()
if err != nil {
- return errors.Wrapf(err, "error retrieving list of mounts")
+ return fmt.Errorf("error retrieving list of mounts: %w", err)
}
// getChildren returns the list of mount IDs that hang off of the
// specified ID.
@@ -255,7 +255,10 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
// find the top of the tree we're unmounting
top := getMountByPoint(mountpoint)
if top == nil {
- return errors.Wrapf(err, "%q is not mounted", mountpoint)
+ if err != nil {
+ return fmt.Errorf("%q is not mounted: %w", mountpoint, err)
+ }
+ return nil
}
// add all of the mounts that are hanging off of it
tree := getTree(top.ID)
@@ -270,7 +273,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
logrus.Debugf("mountpoint %q is not present(?), skipping", mount.Mountpoint)
continue
}
- return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint)
+ return fmt.Errorf("error checking if %q is mounted: %w", mount.Mountpoint, err)
}
if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations)
logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint)
@@ -293,7 +296,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
// if we're also supposed to remove this thing, do that, too
if cutil.StringInSlice(mount.Mountpoint, mountpointsToRemove) {
if err := os.Remove(mount.Mountpoint); err != nil {
- return errors.Wrapf(err, "error removing %q", mount.Mountpoint)
+ return fmt.Errorf("error removing %q: %w", mount.Mountpoint, err)
}
}
}
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 2d81a8c5e..5e6397f7f 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -19,7 +19,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -408,10 +407,10 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) {
}
b := &Builder{}
if err = json.Unmarshal(buildstate, &b); err != nil {
- return nil, errors.Wrapf(err, "error parsing %q, read from %q", string(buildstate), filepath.Join(cdir, stateFile))
+ return nil, fmt.Errorf("error parsing %q, read from %q: %w", string(buildstate), filepath.Join(cdir, stateFile), err)
}
if b.Type != containerType {
- return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type)
+ return nil, fmt.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type)
}
netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath)
@@ -520,7 +519,7 @@ func (b *Builder) Save() error {
return err
}
if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil {
- return errors.Wrapf(err, "error saving builder state to %q", filepath.Join(cdir, stateFile))
+ return fmt.Errorf("error saving builder state to %q: %w", filepath.Join(cdir, stateFile), err)
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/buildah.spec.rpkg b/vendor/github.com/containers/buildah/buildah.spec.rpkg
new file mode 100644
index 000000000..8336ab26c
--- /dev/null
+++ b/vendor/github.com/containers/buildah/buildah.spec.rpkg
@@ -0,0 +1,168 @@
+# For automatic rebuilds in COPR
+
+# The following tag is to get correct syntax highlighting for this file in vim text editor
+# vim: syntax=spec
+
+# Any additinoal comments should go below this line or else syntax highlighting
+# may not work.
+
+# CAUTION: This is not a replacement for RPMs provided by your distro.
+# Only intended to build and test the latest unreleased changes.
+
+%global with_debug 1
+
+%if 0%{?with_debug}
+%global _find_debuginfo_dwz_opts %{nil}
+%global _dwz_low_mem_die_limit 0
+%else
+%global debug_package %{nil}
+%endif
+
+%if ! 0%{?gobuild:1}
+%define gobuild(o:) GO111MODULE=off go build -buildmode pie -compiler gc -tags="rpm_crashtraceback ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld '" -a -v -x %{?**};
+%endif
+
+%global provider github
+%global provider_tld com
+%global project containers
+%global repo %{name}
+# https://github.com/containers/%%{name}
+%global import_path %{provider}.%{provider_tld}/%{project}/%{repo}
+%global git0 https://%{import_path}
+
+Name: {{{ git_dir_name }}}
+Epoch: 101
+Version: {{{ git_dir_version }}}
+Release: 1%{?dist}
+Summary: Manage Pods, Containers and Container Images
+License: ASL 2.0
+URL: https://github.com/containers/buildah
+VCS: {{{ git_dir_vcs }}}
+Source: {{{ git_dir_pack }}}
+BuildRequires: device-mapper-devel
+BuildRequires: git-core
+BuildRequires: golang
+BuildRequires: glib2-devel
+BuildRequires: glibc-static
+BuildRequires: go-md2man
+%if 0%{?fedora} || 0%{?rhel} >= 9
+BuildRequires: go-rpm-macros
+%endif
+BuildRequires: gpgme-devel
+BuildRequires: libassuan-devel
+BuildRequires: make
+BuildRequires: ostree-devel
+BuildRequires: shadow-utils-subid-devel
+%if 0%{?fedora} && ! 0%{?rhel}
+BuildRequires: btrfs-progs-devel
+%endif
+%if 0%{?fedora} <= 35
+Requires: containers-common >= 4:1-39
+%else
+Requires: containers-common >= 4:1-46
+%endif
+%if 0%{?rhel}
+BuildRequires: libseccomp-devel
+%else
+BuildRequires: libseccomp-static
+%endif
+Requires: libseccomp
+Suggests: cpp
+Suggests: qemu-user-static
+
+%description
+The %{name} package provides a command line tool which can be used to
+* create a working container from scratch
+or
+* create a working container from an image as a starting point
+* mount/umount a working container's root file system for manipulation
+* save container's root file system layer to create a new image
+* delete a working container or an image.
+
+%package tests
+Summary: Tests for %{name}
+Requires: %{name} = %{version}-%{release}
+Requires: bats
+Requires: bzip2
+Requires: podman
+Requires: golang
+Requires: jq
+Requires: httpd-tools
+Requires: openssl
+Requires: nmap-ncat
+Requires: git-daemon
+
+%description tests
+%{summary}
+
+This package contains system tests for %{name}
+
+%prep
+{{{ git_dir_setup_macro }}}
+
+%build
+%set_build_flags
+export GO111MODULE=off
+export GOPATH=$(pwd)/_build:$(pwd)
+export CGO_CFLAGS=$CFLAGS
+# These extra flags present in $CFLAGS have been skipped for now as they break the build
+CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
+CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
+CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
+
+%ifarch x86_64
+export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full"
+%endif
+mkdir _build
+pushd _build
+mkdir -p src/%{provider}.%{provider_tld}/%{project}
+ln -s $(dirs +1 -l) src/%{import_path}
+popd
+
+mv vendor src
+
+export CNI_VERSION=`grep '^# github.com/containernetworking/cni ' src/modules.txt | sed 's,.* ,,'`
+export LDFLAGS="-X main.buildInfo=`date +%s` -X main.cniVersion=${CNI_VERSION}"
+
+export BUILDTAGS='seccomp libsubid selinux'
+%if 0%{?rhel}
+export BUILDTAGS='$BUILDTAGS exclude_graphdriver_btrfs btrfs_noversion'
+%endif
+
+%gobuild -o bin/%{name} %{import_path}/cmd/%{name}
+%gobuild -o bin/imgtype %{import_path}/tests/imgtype
+%gobuild -o bin/copy %{import_path}/tests/copy
+GOMD2MAN=go-md2man %{__make} -C docs
+
+# This will copy the files generated by the `make` command above into
+# the installable rpm package.
+%install
+export GOPATH=$(pwd)/_build:$(pwd):%{gopath}
+make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
+make DESTDIR=%{buildroot} PREFIX=%{_prefix} -C docs install
+
+install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
+cp -pav tests/. %{buildroot}/%{_datadir}/%{name}/test/system
+cp bin/imgtype %{buildroot}/%{_bindir}/%{name}-imgtype
+cp bin/copy %{buildroot}/%{_bindir}/%{name}-copy
+
+rm -f %{buildroot}%{_mandir}/man5/{Containerfile.5*,containerignore.5*}
+
+
+%files
+%license LICENSE
+%doc README.md
+%{_bindir}/%{name}
+%{_mandir}/man1/%{name}*
+%dir %{_datadir}/bash-completion
+%dir %{_datadir}/bash-completion/completions
+%{_datadir}/bash-completion/completions/%{name}
+
+%files tests
+%license LICENSE
+%{_bindir}/%{name}-imgtype
+%{_bindir}/%{name}-copy
+%{_datadir}/%{name}/test
+
+%changelog
+{{{ git_dir_changelog }}}
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 9ff7c933d..809a70131 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package chroot
@@ -29,7 +30,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/apparmor"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability"
"golang.org/x/sys/unix"
@@ -109,7 +109,7 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
return err
}
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
- return errors.Wrapf(err, "error storing runtime configuration")
+ return fmt.Errorf("error storing runtime configuration: %w", err)
}
logrus.Debugf("config = %v", string(specbytes))
@@ -127,14 +127,14 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
// Create a pipe for passing configuration down to the next process.
preader, pwriter, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating configuration pipe")
+ return fmt.Errorf("error creating configuration pipe: %w", err)
}
config, conferr := json.Marshal(runUsingChrootSubprocOptions{
Spec: spec,
BundlePath: bundlePath,
})
if conferr != nil {
- return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingChrootCommand)
+ return fmt.Errorf("error encoding configuration for %q: %w", runUsingChrootCommand, conferr)
}
// Set our terminal's mode to raw, to pass handling of special
@@ -551,7 +551,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
// Create a pipe for passing configuration down to the next process.
preader, pwriter, err := os.Pipe()
if err != nil {
- return 1, errors.Wrapf(err, "error creating configuration pipe")
+ return 1, fmt.Errorf("error creating configuration pipe: %w", err)
}
config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{
Spec: spec,
@@ -921,7 +921,7 @@ func setApparmorProfile(spec *specs.Spec) error {
return nil
}
if err := apparmor.ApplyProfile(spec.Process.ApparmorProfile); err != nil {
- return errors.Wrapf(err, "error setting apparmor profile to %q", spec.Process.ApparmorProfile)
+ return fmt.Errorf("error setting apparmor profile to %q: %w", spec.Process.ApparmorProfile, err)
}
return nil
}
@@ -930,14 +930,14 @@ func setApparmorProfile(spec *specs.Spec) error {
func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
currentCaps, err := capability.NewPid2(0)
if err != nil {
- return errors.Wrapf(err, "error reading capabilities of current process")
+ return fmt.Errorf("error reading capabilities of current process: %w", err)
}
if err := currentCaps.Load(); err != nil {
- return errors.Wrapf(err, "error loading capabilities")
+ return fmt.Errorf("error loading capabilities: %w", err)
}
caps, err := capability.NewPid2(0)
if err != nil {
- return errors.Wrapf(err, "error reading capabilities of current process")
+ return fmt.Errorf("error reading capabilities of current process: %w", err)
}
capMap := map[capability.CapType][]string{
capability.BOUNDING: spec.Process.Capabilities.Bounding,
@@ -958,7 +958,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
}
}
if cap == noCap {
- return errors.Errorf("error mapping capability %q to a number", capToSet)
+ return fmt.Errorf("error mapping capability %q to a number", capToSet)
}
caps.Set(capType, cap)
}
@@ -971,7 +971,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
}
}
if cap == noCap {
- return errors.Errorf("error mapping capability %q to a number", capToSet)
+ return fmt.Errorf("error mapping capability %q to a number", capToSet)
}
if currentCaps.Get(capType, cap) {
caps.Set(capType, cap)
@@ -979,7 +979,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
}
}
if err = caps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS); err != nil {
- return errors.Wrapf(err, "error setting capabilities")
+ return fmt.Errorf("error setting capabilities: %w", err)
}
return nil
}
@@ -994,7 +994,7 @@ func parseRlimits(spec *specs.Spec) (map[int]unix.Rlimit, error) {
for _, limit := range spec.Process.Rlimits {
resource, recognized := rlimitsMap[strings.ToUpper(limit.Type)]
if !recognized {
- return nil, errors.Errorf("error parsing limit type %q", limit.Type)
+ return nil, fmt.Errorf("error parsing limit type %q", limit.Type)
}
parsed[resource] = unix.Rlimit{Cur: limit.Soft, Max: limit.Hard}
}
@@ -1011,7 +1011,7 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
for resource, desired := range limits {
var current unix.Rlimit
if err := unix.Getrlimit(resource, &current); err != nil {
- return errors.Wrapf(err, "error reading %q limit", rlimitsReverseMap[resource])
+ return fmt.Errorf("error reading %q limit: %w", rlimitsReverseMap[resource], err)
}
if desired.Max > current.Max && onlyLower {
// this would raise a hard limit, and we're only here to lower them
@@ -1022,7 +1022,7 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
continue
}
if err := unix.Setrlimit(resource, &desired); err != nil {
- return errors.Wrapf(err, "error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d)", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max)
+ return fmt.Errorf("error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d): %w", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max, err)
}
}
return nil
@@ -1032,11 +1032,11 @@ func makeReadOnly(mntpoint string, flags uintptr) error {
var fs unix.Statfs_t
// Make sure it's read-only.
if err := unix.Statfs(mntpoint, &fs); err != nil {
- return errors.Wrapf(err, "error checking if directory %q was bound read-only", mntpoint)
+ return fmt.Errorf("error checking if directory %q was bound read-only: %w", mntpoint, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil {
- return errors.Wrapf(err, "error remounting %s in mount namespace read-only", mntpoint)
+ return fmt.Errorf("error remounting %s in mount namespace read-only: %w", mntpoint, err)
}
}
return nil
@@ -1097,16 +1097,16 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
}
if err != nil {
- return undoBinds, errors.Wrapf(err, "error bind mounting /dev from host into mount namespace")
+ return undoBinds, fmt.Errorf("error bind mounting /dev from host into mount namespace: %w", err)
}
}
// Make sure it's read-only.
if err = unix.Statfs(subDev, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subDev)
+ return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", subDev, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting /dev in mount namespace read-only")
+ return undoBinds, fmt.Errorf("error remounting /dev in mount namespace read-only: %w", err)
}
}
logrus.Debugf("bind mounted %q to %q", "/dev", filepath.Join(spec.Root.Path, "/dev"))
@@ -1121,7 +1121,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
}
if err != nil {
- return undoBinds, errors.Wrapf(err, "error bind mounting /proc from host into mount namespace")
+ return undoBinds, fmt.Errorf("error bind mounting /proc from host into mount namespace: %w", err)
}
}
logrus.Debugf("bind mounted %q to %q", "/proc", filepath.Join(spec.Root.Path, "/proc"))
@@ -1136,7 +1136,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
}
if err != nil {
- return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
+ return undoBinds, fmt.Errorf("error bind mounting /sys from host into mount namespace: %w", err)
}
}
if err := makeReadOnly(subSys, sysFlags); err != nil {
@@ -1194,14 +1194,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
case "bind":
srcinfo, err = os.Stat(m.Source)
if err != nil {
- return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", m.Source)
+ return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", m.Source, err)
}
case "overlay":
fallthrough
case "tmpfs":
srcinfo, err = os.Stat("/")
if err != nil {
- return undoBinds, errors.Wrapf(err, "error examining / to use as a template for a %s", m.Type)
+ return undoBinds, fmt.Errorf("error examining / to use as a template for a %s: %w", m.Type, err)
}
}
target := filepath.Join(spec.Root.Path, m.Destination)
@@ -1211,7 +1211,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
if err == nil && stat != nil && (stat.Mode()&os.ModeSymlink != 0) {
target, err = copier.Eval(spec.Root.Path, m.Destination, copier.EvalOptions{})
if err != nil {
- return nil, errors.Wrapf(err, "evaluating symlink %q", target)
+ return nil, fmt.Errorf("evaluating symlink %q: %w", target, err)
}
// Stat the destination of the evaluated symlink
_, err = os.Stat(target)
@@ -1219,20 +1219,20 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
if err != nil {
// If the target can't be stat()ted, check the error.
if !os.IsNotExist(err) {
- return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", target)
+ return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", target, err)
}
// The target isn't there yet, so create it.
if srcinfo.IsDir() {
if err = os.MkdirAll(target, 0755); err != nil {
- return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target)
+ return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err)
}
} else {
if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil {
- return undoBinds, errors.Wrapf(err, "error ensuring parent of mountpoint %q (%q) is present in mount namespace", target, filepath.Dir(target))
+ return undoBinds, fmt.Errorf("error ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
}
var file *os.File
if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil {
- return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target)
+ return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err)
}
file.Close()
}
@@ -1272,28 +1272,28 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// Do the bind mount.
logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target)
+ return undoBinds, fmt.Errorf("error bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
}
logrus.Debugf("bind mounted %q to %q", m.Source, target)
case "tmpfs":
// Mount a tmpfs.
if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
- return undoBinds, errors.Wrapf(err, "error mounting tmpfs to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ","))
+ return undoBinds, fmt.Errorf("error mounting tmpfs to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err)
}
logrus.Debugf("mounted a tmpfs to %q", target)
case "overlay":
// Mount a overlay.
if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
- return undoBinds, errors.Wrapf(err, "error mounting overlay to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ","))
+ return undoBinds, fmt.Errorf("error mounting overlay to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err)
}
logrus.Debugf("mounted a overlay to %q", target)
}
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target)
+ return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", target, err)
}
if uintptr(fs.Flags)&expectedFlags != expectedFlags {
if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags", target)
+ return undoBinds, fmt.Errorf("error remounting %q in mount namespace with expected flags: %w", target, err)
}
}
}
@@ -1308,7 +1308,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// No target, no problem.
continue
}
- return undoBinds, errors.Wrapf(err, "error checking %q for symlinks before marking it read-only", r)
+ return undoBinds, fmt.Errorf("error checking %q for symlinks before marking it read-only: %w", r, err)
}
// Check if the location is already read-only.
var fs unix.Statfs_t
@@ -1317,7 +1317,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// No target, no problem.
continue
}
- return undoBinds, errors.Wrapf(err, "error checking if directory %q is already read-only", target)
+ return undoBinds, fmt.Errorf("error checking if directory %q is already read-only: %w", target, err)
}
if fs.Flags&unix.ST_RDONLY != 0 {
continue
@@ -1329,23 +1329,23 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// No target, no problem.
continue
}
- return undoBinds, errors.Wrapf(err, "error bind mounting %q onto itself in preparation for making it read-only", target)
+ return undoBinds, fmt.Errorf("error bind mounting %q onto itself in preparation for making it read-only: %w", target, err)
}
// Remount the location read-only.
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target)
+ return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", target, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace read-only", target)
+ return undoBinds, fmt.Errorf("error remounting %q in mount namespace read-only: %w", target, err)
}
}
// Check again.
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was remounted read-only", target)
+ return undoBinds, fmt.Errorf("error checking if directory %q was remounted read-only: %w", target, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
- return undoBinds, errors.Wrapf(err, "error verifying that %q in mount namespace was remounted read-only", target)
+ return undoBinds, fmt.Errorf("error verifying that %q in mount namespace was remounted read-only: %w", target, err)
}
}
@@ -1353,7 +1353,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
roEmptyDir := filepath.Join(bundlePath, "empty")
if len(spec.Linux.MaskedPaths) > 0 {
if err := os.Mkdir(roEmptyDir, 0700); err != nil {
- return undoBinds, errors.Wrapf(err, "error creating empty directory %q", roEmptyDir)
+ return undoBinds, fmt.Errorf("error creating empty directory %q: %w", roEmptyDir, err)
}
}
@@ -1374,19 +1374,19 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// No target, no problem.
continue
}
- return undoBinds, errors.Wrapf(err, "error examining %q for masking in mount namespace", target)
+ return undoBinds, fmt.Errorf("error examining %q for masking in mount namespace: %w", target, err)
}
if targetinfo.IsDir() {
// The target's a directory. Check if it's a read-only filesystem.
var statfs unix.Statfs_t
if err = unix.Statfs(target, &statfs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q is a mountpoint", target)
+ return undoBinds, fmt.Errorf("error checking if directory %q is a mountpoint: %w", target, err)
}
isReadOnly := statfs.Flags&unix.MS_RDONLY != 0
// Check if any of the IDs we're mapping could read it.
var stat unix.Stat_t
if err = unix.Stat(target, &stat); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking permissions on directory %q", target)
+ return undoBinds, fmt.Errorf("error checking permissions on directory %q: %w", target, err)
}
isAccessible := false
if stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 {
@@ -1417,13 +1417,13 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
directory, err := os.Open(target)
if err != nil {
if !os.IsPermission(err) {
- return undoBinds, errors.Wrapf(err, "error opening directory %q", target)
+ return undoBinds, fmt.Errorf("error opening directory %q: %w", target, err)
}
} else {
names, err := directory.Readdirnames(0)
directory.Close()
if err != nil {
- return undoBinds, errors.Wrapf(err, "error reading contents of directory %q", target)
+ return undoBinds, fmt.Errorf("error reading contents of directory %q: %w", target, err)
}
hasContent = false
for _, name := range names {
@@ -1442,14 +1442,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
roFlags := uintptr(syscall.MS_BIND | syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY)
if !isReadOnly || (hasContent && isAccessible) {
if err = unix.Mount(roEmptyDir, target, "bind", roFlags, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error masking directory %q in mount namespace", target)
+ return undoBinds, fmt.Errorf("error masking directory %q in mount namespace: %w", target, err)
}
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was mounted read-only in mount namespace", target)
+ return undoBinds, fmt.Errorf("error checking if directory %q was mounted read-only in mount namespace: %w", target, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
if err = unix.Mount(target, target, "", roFlags|syscall.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error making sure directory %q in mount namespace is read only", target)
+ return undoBinds, fmt.Errorf("error making sure directory %q in mount namespace is read only: %w", target, err)
}
}
}
@@ -1457,7 +1457,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// If the target's is not a directory or os.DevNull, bind mount os.DevNull over it.
if !isDevNull(targetinfo) {
if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target)
+ return undoBinds, fmt.Errorf("error masking non-directory %q in mount namespace: %w", target, err)
}
}
}
diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go
index f36359e34..aebb1a180 100644
--- a/vendor/github.com/containers/buildah/chroot/seccomp.go
+++ b/vendor/github.com/containers/buildah/chroot/seccomp.go
@@ -4,11 +4,11 @@
package chroot
import (
+ "fmt"
"io/ioutil"
"github.com/containers/common/pkg/seccomp"
specs "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
libseccomp "github.com/seccomp/libseccomp-golang"
"github.com/sirupsen/logrus"
)
@@ -109,13 +109,13 @@ func setSeccomp(spec *specs.Spec) error {
return libseccomp.CompareInvalid
}
- filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, nil))
+ filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, spec.Linux.Seccomp.DefaultErrnoRet))
if err != nil {
- return errors.Wrapf(err, "error creating seccomp filter with default action %q", spec.Linux.Seccomp.DefaultAction)
+ return fmt.Errorf("error creating seccomp filter with default action %q: %w", spec.Linux.Seccomp.DefaultAction, err)
}
for _, arch := range spec.Linux.Seccomp.Architectures {
if err = filter.AddArch(mapArch(arch)); err != nil {
- return errors.Wrapf(err, "error adding architecture %q(%q) to seccomp filter", arch, mapArch(arch))
+ return fmt.Errorf("error adding architecture %q(%q) to seccomp filter: %w", arch, mapArch(arch), err)
}
}
for _, rule := range spec.Linux.Seccomp.Syscalls {
@@ -131,7 +131,7 @@ func setSeccomp(spec *specs.Spec) error {
for scnum := range scnames {
if len(rule.Args) == 0 {
if err = filter.AddRule(scnum, mapAction(rule.Action, rule.ErrnoRet)); err != nil {
- return errors.Wrapf(err, "error adding a rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action)
+ return fmt.Errorf("error adding a rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
}
continue
}
@@ -140,7 +140,7 @@ func setSeccomp(spec *specs.Spec) error {
for _, arg := range rule.Args {
condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo)
if err != nil {
- return errors.Wrapf(err, "error building a seccomp condition %d:%v:%d:%d", arg.Index, arg.Op, arg.Value, arg.ValueTwo)
+ return fmt.Errorf("error building a seccomp condition %d:%v:%d:%d: %w", arg.Index, arg.Op, arg.Value, arg.ValueTwo, err)
}
if arg.Op != specs.OpEqualTo {
opsAreAllEquality = false
@@ -156,22 +156,22 @@ func setSeccomp(spec *specs.Spec) error {
if len(rule.Args) > 1 && opsAreAllEquality && err.Error() == "two checks on same syscall argument" {
for i := range conditions {
if err = filter.AddRuleConditional(scnum, mapAction(rule.Action, rule.ErrnoRet), conditions[i:i+1]); err != nil {
- return errors.Wrapf(err, "error adding a conditional rule (%q:%q[%d]) to seccomp filter", scnames[scnum], rule.Action, i)
+ return fmt.Errorf("error adding a conditional rule (%q:%q[%d]) to seccomp filter: %w", scnames[scnum], rule.Action, i, err)
}
}
} else {
- return errors.Wrapf(err, "error adding a conditional rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action)
+ return fmt.Errorf("error adding a conditional rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
}
}
}
}
if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil {
- return errors.Wrapf(err, "error setting no-new-privileges bit to %v", spec.Process.NoNewPrivileges)
+ return fmt.Errorf("error setting no-new-privileges bit to %v: %w", spec.Process.NoNewPrivileges, err)
}
err = filter.Load()
filter.Release()
if err != nil {
- return errors.Wrapf(err, "error activating seccomp filter")
+ return fmt.Errorf("error activating seccomp filter: %w", err)
}
return nil
}
@@ -183,17 +183,17 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
case "":
seccompConfig, err := seccomp.GetDefaultProfile(spec)
if err != nil {
- return errors.Wrapf(err, "loading default seccomp profile failed")
+ return fmt.Errorf("loading default seccomp profile failed: %w", err)
}
spec.Linux.Seccomp = seccompConfig
default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
if err != nil {
- return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath)
+ return fmt.Errorf("opening seccomp profile (%s) failed: %w", seccompProfilePath, err)
}
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
if err != nil {
- return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath)
+ return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
}
spec.Linux.Seccomp = seccompConfig
}
diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go
index f33dd254a..5e97ca073 100644
--- a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go
+++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go
@@ -1,10 +1,12 @@
+//go:build !linux || !seccomp
// +build !linux !seccomp
package chroot
import (
+ "errors"
+
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
func setSeccomp(spec *specs.Spec) error {
diff --git a/vendor/github.com/containers/buildah/chroot/selinux.go b/vendor/github.com/containers/buildah/chroot/selinux.go
index ef96a0e7a..538c0e3f4 100644
--- a/vendor/github.com/containers/buildah/chroot/selinux.go
+++ b/vendor/github.com/containers/buildah/chroot/selinux.go
@@ -1,12 +1,14 @@
+//go:build linux
// +build linux
package chroot
import (
+ "fmt"
+
"github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -15,7 +17,7 @@ func setSelinuxLabel(spec *specs.Spec) error {
logrus.Debugf("setting selinux label")
if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() {
if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil {
- return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel)
+ return fmt.Errorf("error setting process label to %q: %w", spec.Process.SelinuxLabel, err)
}
}
return nil
diff --git a/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go
index 41d2b86be..9c2eb2843 100644
--- a/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go
+++ b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go
@@ -1,10 +1,12 @@
+//go:build !linux
// +build !linux
package chroot
import (
+ "errors"
+
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
func setSelinuxLabel(spec *specs.Spec) error {
diff --git a/vendor/github.com/containers/buildah/chroot/unsupported.go b/vendor/github.com/containers/buildah/chroot/unsupported.go
index 5312c0024..7c112d5e7 100644
--- a/vendor/github.com/containers/buildah/chroot/unsupported.go
+++ b/vendor/github.com/containers/buildah/chroot/unsupported.go
@@ -1,15 +1,16 @@
+//go:build !linux
// +build !linux
package chroot
import (
+ "fmt"
"io"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// RunUsingChroot is not supported.
-func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
- return errors.Errorf("--isolation chroot is not supported on this platform")
+func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
+ return fmt.Errorf("--isolation chroot is not supported on this platform")
}
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 6122a6696..d340ca0a2 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -3,6 +3,8 @@ package buildah
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -25,7 +27,6 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/stringid"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -145,7 +146,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse
AllowedRegistries []string `json:"allowedRegistries,omitempty"`
}
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
- return false, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
+ return false, fmt.Errorf("error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err)
}
blocked := false
if len(sources.BlockedRegistries) > 0 {
@@ -156,7 +157,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse
}
}
if blocked {
- return false, errors.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref))
+ return false, fmt.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref))
}
allowed := true
if len(sources.AllowedRegistries) > 0 {
@@ -168,7 +169,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse
}
}
if !allowed {
- return false, errors.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref))
+ return false, fmt.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref))
}
if len(sources.InsecureRegistries) > 0 {
return true, nil
@@ -204,7 +205,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
names, err := util.ExpandNames([]string{manifestName}, systemContext, b.store)
if err != nil {
- return "", errors.Wrapf(err, "error encountered while expanding manifest list name %q", manifestName)
+ return "", fmt.Errorf("error encountered while expanding manifest list name %q: %w", manifestName, err)
}
ref, err := util.VerifyTagName(imageSpec)
@@ -247,7 +248,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// work twice.
if options.OmitTimestamp {
if options.HistoryTimestamp != nil {
- return imgID, nil, "", errors.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
+ return imgID, nil, "", fmt.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
}
timestamp := time.Unix(0, 0).UTC()
options.HistoryTimestamp = &timestamp
@@ -257,7 +258,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
nameToRemove = stringid.GenerateRandomID() + "-tmp"
dest2, err := is.Transport.ParseStoreReference(b.store, nameToRemove)
if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error creating temporary destination reference for image")
+ return imgID, nil, "", fmt.Errorf("error creating temporary destination reference for image: %w", err)
}
dest = dest2
}
@@ -266,23 +267,23 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
blocked, err := isReferenceBlocked(dest, systemContext)
if err != nil {
- return "", nil, "", errors.Wrapf(err, "error checking if committing to registry for %q is blocked", transports.ImageName(dest))
+ return "", nil, "", fmt.Errorf("error checking if committing to registry for %q is blocked: %w", transports.ImageName(dest), err)
}
if blocked {
- return "", nil, "", errors.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest))
+ return "", nil, "", fmt.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest))
}
// Load the system signing policy.
commitPolicy, err := signature.DefaultPolicy(systemContext)
if err != nil {
- return "", nil, "", errors.Wrapf(err, "error obtaining default signature policy")
+ return "", nil, "", fmt.Errorf("error obtaining default signature policy: %w", err)
}
// Override the settings for local storage to make sure that we can always read the source "image".
commitPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes
policyContext, err := signature.NewPolicyContext(commitPolicy)
if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error creating new signature policy context")
+ return imgID, nil, "", fmt.Errorf("error creating new signature policy context: %w", err)
}
defer func() {
if err2 := policyContext.Destroy(); err2 != nil {
@@ -297,7 +298,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
if insecure {
if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
- return imgID, nil, "", errors.Errorf("can't require tls verification on an insecured registry")
+ return imgID, nil, "", fmt.Errorf("can't require tls verification on an insecured registry")
}
systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
systemContext.OCIInsecureSkipTLSVerify = true
@@ -308,7 +309,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Build an image reference from which we can copy the finished image.
src, err = b.makeContainerImageRef(options)
if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
+ return imgID, nil, "", fmt.Errorf("error computing layer digests and building metadata for container %q: %w", b.ContainerID, err)
}
// In case we're using caching, decide how to handle compression for a cache.
// If we're using blob caching, set it up for the source.
@@ -321,12 +322,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
+ return imgID, nil, "", fmt.Errorf("error wrapping image reference %q in blob cache at %q: %w", transports.ImageName(src), options.BlobDirectory, err)
}
maybeCachedSrc = cache
cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress)
if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(dest), options.BlobDirectory)
+ return imgID, nil, "", fmt.Errorf("error wrapping image reference %q in blob cache at %q: %w", transports.ImageName(dest), options.BlobDirectory, err)
}
maybeCachedDest = cache
}
@@ -347,7 +348,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
var manifestBytes []byte
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
+ return imgID, nil, "", fmt.Errorf("error copying layers and metadata for container %q: %w", b.ContainerID, err)
}
// If we've got more names to attach, and we know how to do that for
// the transport that we're writing the new image to, add them now.
@@ -356,10 +357,10 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
case is.Transport.Name():
img, err := is.Transport.GetStoreImage(b.store, dest)
if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
+ return imgID, nil, "", fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
}
if err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags); err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error setting image names to %v", append(img.Names, options.AdditionalTags...))
+ return imgID, nil, "", fmt.Errorf("error setting image names to %v: %w", append(img.Names, options.AdditionalTags...), err)
}
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
default:
@@ -368,8 +369,8 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
img, err := is.Transport.GetStoreImage(b.store, dest)
- if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
- return imgID, nil, "", errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(dest))
+ if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
+ return imgID, nil, "", fmt.Errorf("error locating image %q in local storage: %w", transports.ImageName(dest), err)
}
if err == nil {
imgID = img.ID
@@ -381,12 +382,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
if len(prunedNames) < len(img.Names) {
if err = b.store.SetNames(imgID, prunedNames); err != nil {
- return imgID, nil, "", errors.Wrapf(err, "failed to prune temporary name from image %q", imgID)
+ return imgID, nil, "", fmt.Errorf("failed to prune temporary name from image %q: %w", imgID, err)
}
logrus.Debugf("reassigned names %v to image %q", prunedNames, img.ID)
dest2, err := is.Transport.ParseStoreReference(b.store, "@"+imgID)
if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error creating unnamed destination reference for image")
+ return imgID, nil, "", fmt.Errorf("error creating unnamed destination reference for image: %w", err)
}
dest = dest2
}
@@ -399,7 +400,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest))
+ return imgID, nil, "", fmt.Errorf("error computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
}
var ref reference.Canonical
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 0b6cf4e45..aa6290186 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -3,6 +3,7 @@ package buildah
import (
"context"
"encoding/json"
+ "fmt"
"os"
"runtime"
"strings"
@@ -18,7 +19,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/stringid"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -28,7 +28,7 @@ import (
func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error {
_, actualManifestMIMEType, err := img.Manifest(ctx)
if err != nil {
- return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference()))
+ return fmt.Errorf("error getting manifest MIME type for %q: %w", transports.ImageName(img.Reference()), err)
}
if wantedManifestMIMEType != actualManifestMIMEType {
layerInfos := img.LayerInfos()
@@ -40,22 +40,22 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I
LayerInfos: layerInfos,
})
if err != nil {
- return errors.Wrapf(err, "resetting recorded compression for %q", transports.ImageName(img.Reference()))
+ return fmt.Errorf("resetting recorded compression for %q: %w", transports.ImageName(img.Reference()), err)
}
secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{
ManifestMIMEType: wantedManifestMIMEType,
})
if err != nil {
- return errors.Wrapf(err, "error converting image %q from %q to %q", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType)
+ return fmt.Errorf("error converting image %q from %q to %q: %w", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType, err)
}
img = secondUpdatedImg
}
config, err := img.ConfigBlob(ctx)
if err != nil {
- return errors.Wrapf(err, "error reading %s config from %q", wantedManifestMIMEType, transports.ImageName(img.Reference()))
+ return fmt.Errorf("error reading %s config from %q: %w", wantedManifestMIMEType, transports.ImageName(img.Reference()), err)
}
if err := json.Unmarshal(config, dest); err != nil {
- return errors.Wrapf(err, "error parsing %s configuration %q from %q", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference()))
+ return fmt.Errorf("error parsing %s configuration %q from %q: %w", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference()), err)
}
return nil
}
@@ -64,11 +64,11 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy
if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one.
rawManifest, manifestMIMEType, err := img.Manifest(ctx)
if err != nil {
- return errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(img.Reference()))
+ return fmt.Errorf("error reading image manifest for %q: %w", transports.ImageName(img.Reference()), err)
}
rawConfig, err := img.ConfigBlob(ctx)
if err != nil {
- return errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(img.Reference()))
+ return fmt.Errorf("error reading image configuration for %q: %w", transports.ImageName(img.Reference()), err)
}
b.Manifest = rawManifest
b.Config = rawConfig
@@ -89,7 +89,7 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy
// Attempt to recover format-specific data from the manifest.
v1Manifest := ociv1.Manifest{}
if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil {
- return errors.Wrapf(err, "error parsing OCI manifest %q", string(b.Manifest))
+ return fmt.Errorf("error parsing OCI manifest %q: %w", string(b.Manifest), err)
}
for k, v := range v1Manifest.Annotations {
// NOTE: do not override annotations that are
diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go
index 23bf0fb45..3c7b021e1 100644
--- a/vendor/github.com/containers/buildah/copier/copier.go
+++ b/vendor/github.com/containers/buildah/copier/copier.go
@@ -4,7 +4,7 @@ import (
"archive/tar"
"bytes"
"encoding/json"
- stderrors "errors"
+ "errors"
"fmt"
"io"
"io/fs"
@@ -25,7 +25,6 @@ import (
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -454,17 +453,17 @@ func cleanerReldirectory(candidate string) string {
// the two directories are on different volumes
func convertToRelSubdirectory(root, directory string) (relative string, err error) {
if root == "" || !filepath.IsAbs(root) {
- return "", errors.Errorf("expected root directory to be an absolute path, got %q", root)
+ return "", fmt.Errorf("expected root directory to be an absolute path, got %q", root)
}
if directory == "" || !filepath.IsAbs(directory) {
- return "", errors.Errorf("expected directory to be an absolute path, got %q", root)
+ return "", fmt.Errorf("expected directory to be an absolute path, got %q", root)
}
if filepath.VolumeName(root) != filepath.VolumeName(directory) {
- return "", errors.Errorf("%q and %q are on different volumes", root, directory)
+ return "", fmt.Errorf("%q and %q are on different volumes", root, directory)
}
rel, err := filepath.Rel(root, directory)
if err != nil {
- return "", errors.Wrapf(err, "error computing path of %q relative to %q", directory, root)
+ return "", fmt.Errorf("error computing path of %q relative to %q: %w", directory, root, err)
}
return cleanerReldirectory(rel), nil
}
@@ -472,7 +471,7 @@ func convertToRelSubdirectory(root, directory string) (relative string, err erro
func currentVolumeRoot() (string, error) {
cwd, err := os.Getwd()
if err != nil {
- return "", errors.Wrapf(err, "error getting current working directory")
+ return "", fmt.Errorf("error getting current working directory: %w", err)
}
return filepath.VolumeName(cwd) + string(os.PathSeparator), nil
}
@@ -480,7 +479,7 @@ func currentVolumeRoot() (string, error) {
func isVolumeRoot(candidate string) (bool, error) {
abs, err := filepath.Abs(candidate)
if err != nil {
- return false, errors.Wrapf(err, "error converting %q to an absolute path", candidate)
+ return false, fmt.Errorf("error converting %q to an absolute path: %w", candidate, err)
}
return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil
}
@@ -494,7 +493,7 @@ func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response,
if req.Root == "" {
wd, err := os.Getwd()
if err != nil {
- return nil, errors.Wrapf(err, "error getting current working directory")
+ return nil, fmt.Errorf("error getting current working directory: %w", err)
}
req.Directory = wd
} else {
@@ -504,19 +503,19 @@ func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response,
if req.Root == "" {
root, err := currentVolumeRoot()
if err != nil {
- return nil, errors.Wrapf(err, "error determining root of current volume")
+ return nil, fmt.Errorf("error determining root of current volume: %w", err)
}
req.Root = root
}
if filepath.IsAbs(req.Directory) {
_, err := convertToRelSubdirectory(req.Root, req.Directory)
if err != nil {
- return nil, errors.Wrapf(err, "error rewriting %q to be relative to %q", req.Directory, req.Root)
+ return nil, fmt.Errorf("error rewriting %q to be relative to %q: %w", req.Directory, req.Root, err)
}
}
isAlreadyRoot, err := isVolumeRoot(req.Root)
if err != nil {
- return nil, errors.Wrapf(err, "error checking if %q is a root directory", req.Root)
+ return nil, fmt.Errorf("error checking if %q is a root directory: %w", req.Root, err)
}
if !isAlreadyRoot && canChroot {
return copierWithSubprocess(bulkReader, bulkWriter, req)
@@ -578,27 +577,27 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
cmd := reexec.Command(copierCommand)
stdinRead, stdinWrite, err := os.Pipe()
if err != nil {
- return nil, errors.Wrapf(err, "pipe")
+ return nil, fmt.Errorf("pipe: %w", err)
}
defer closeIfNotNilYet(&stdinRead, "stdin pipe reader")
defer closeIfNotNilYet(&stdinWrite, "stdin pipe writer")
encoder := json.NewEncoder(stdinWrite)
stdoutRead, stdoutWrite, err := os.Pipe()
if err != nil {
- return nil, errors.Wrapf(err, "pipe")
+ return nil, fmt.Errorf("pipe: %w", err)
}
defer closeIfNotNilYet(&stdoutRead, "stdout pipe reader")
defer closeIfNotNilYet(&stdoutWrite, "stdout pipe writer")
decoder := json.NewDecoder(stdoutRead)
bulkReaderRead, bulkReaderWrite, err := os.Pipe()
if err != nil {
- return nil, errors.Wrapf(err, "pipe")
+ return nil, fmt.Errorf("pipe: %w", err)
}
defer closeIfNotNilYet(&bulkReaderRead, "child bulk content reader pipe, read end")
defer closeIfNotNilYet(&bulkReaderWrite, "child bulk content reader pipe, write end")
bulkWriterRead, bulkWriterWrite, err := os.Pipe()
if err != nil {
- return nil, errors.Wrapf(err, "pipe")
+ return nil, fmt.Errorf("pipe: %w", err)
}
defer closeIfNotNilYet(&bulkWriterRead, "child bulk content writer pipe, read end")
defer closeIfNotNilYet(&bulkWriterWrite, "child bulk content writer pipe, write end")
@@ -611,7 +610,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
cmd.Stderr = &errorBuffer
cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite}
if err = cmd.Start(); err != nil {
- return nil, errors.Wrapf(err, "error starting subprocess")
+ return nil, fmt.Errorf("error starting subprocess: %w", err)
}
cmdToWaitFor := cmd
defer func() {
@@ -633,9 +632,9 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
bulkWriterWrite = nil
killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam
if err2 := cmd.Process.Kill(); err2 != nil {
- return nil, errors.Wrapf(err, "error killing subprocess: %v; %s", err2, step)
+ return nil, fmt.Errorf("error killing subprocess: %v; %s: %w", err2, step, err)
}
- return nil, errors.Wrap(err, step)
+ return nil, fmt.Errorf("%v: %w", step, err)
}
if err = encoder.Encode(req); err != nil {
return killAndReturn(err, "error encoding request for copier subprocess")
@@ -691,10 +690,10 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
}
}
if readError != nil {
- return nil, errors.Wrapf(readError, "error passing bulk input to subprocess")
+ return nil, fmt.Errorf("error passing bulk input to subprocess: %w", readError)
}
if writeError != nil {
- return nil, errors.Wrapf(writeError, "error passing bulk output from subprocess")
+ return nil, fmt.Errorf("error passing bulk output from subprocess: %w", writeError)
}
return resp, nil
}
@@ -846,7 +845,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
excludes := req.Excludes()
pm, err := fileutils.NewPatternMatcher(excludes)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error processing excludes list %v", excludes)
+ return nil, nil, fmt.Errorf("error processing excludes list %v: %w", excludes, err)
}
var idMappings *idtools.IDMappings
@@ -857,7 +856,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
switch req.Request {
default:
- return nil, nil, errors.Errorf("not an implemented request type: %q", req.Request)
+ return nil, nil, fmt.Errorf("not an implemented request type: %q", req.Request)
case requestEval:
resp := copierHandlerEval(req)
return resp, nil, nil
@@ -884,7 +883,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bool, error) {
rel, err := convertToRelSubdirectory(root, path)
if err != nil {
- return "", false, errors.Wrapf(err, "copier: error computing path of %q relative to root %q", path, root)
+ return "", false, fmt.Errorf("copier: error computing path of %q relative to root %q: %w", path, root, err)
}
if pm == nil {
return rel, false, nil
@@ -898,7 +897,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo
// it expects Unix-style paths.
matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
if err != nil {
- return rel, false, errors.Wrapf(err, "copier: error checking if %q is excluded", rel)
+ return rel, false, fmt.Errorf("copier: error checking if %q is excluded: %w", rel, err)
}
if matches {
return rel, true, nil
@@ -916,7 +915,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo
func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.PatternMatcher) (string, error) {
rel, err := convertToRelSubdirectory(root, path)
if err != nil {
- return "", errors.Errorf("error making path %q relative to %q", path, root)
+ return "", fmt.Errorf("error making path %q relative to %q", path, root)
}
workingPath := root
followed := 0
@@ -953,7 +952,7 @@ func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.P
// resolve the remaining components
rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target))
if err != nil {
- return "", errors.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root)
+ return "", fmt.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root)
}
workingPath = root
components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...)
@@ -1101,11 +1100,10 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
}
func errorIsPermission(err error) bool {
- err = errors.Cause(err)
if err == nil {
return false
}
- return os.IsPermission(err) || strings.Contains(err.Error(), "permission denied")
+ return errors.Is(err, os.ErrPermission) || strings.Contains(err.Error(), "permission denied")
}
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
@@ -1154,7 +1152,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
// if the named thing-to-read is a symlink, dereference it
info, err := os.Lstat(item)
if err != nil {
- return errors.Wrapf(err, "copier: get: lstat %q", item)
+ return fmt.Errorf("copier: get: lstat %q: %w", item, err)
}
// chase links. if we hit a dead end, we should just fail
followedLinks := 0
@@ -1171,15 +1169,15 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
}
item = path
if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
- return errors.Wrapf(err, "copier: get: computing path of %q(%q) relative to %q", queue[i], item, req.Root)
+ return fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", queue[i], item, req.Root, err)
}
if info, err = os.Lstat(item); err != nil {
- return errors.Wrapf(err, "copier: get: lstat %q(%q)", queue[i], item)
+ return fmt.Errorf("copier: get: lstat %q(%q): %w", queue[i], item, err)
}
followedLinks++
}
if followedLinks >= maxFollowedLinks {
- return errors.Wrapf(syscall.ELOOP, "copier: get: resolving symlink %q(%q)", queue[i], item)
+ return fmt.Errorf("copier: get: resolving symlink %q(%q): %w", queue[i], item, syscall.ELOOP)
}
// evaluate excludes relative to the root directory
if info.Mode().IsDir() {
@@ -1193,11 +1191,11 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
return filepath.SkipDir
}
return nil
- } else if os.IsNotExist(errors.Cause(err)) {
+ } else if errors.Is(err, os.ErrNotExist) {
logrus.Warningf("copier: file disappeared while reading: %q", path)
return nil
}
- return errors.Wrapf(err, "copier: get: error reading %q", path)
+ return fmt.Errorf("copier: get: error reading %q: %w", path, err)
}
if d.Type() == os.ModeSocket {
logrus.Warningf("copier: skipping socket %q", d.Name())
@@ -1208,7 +1206,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
// for the tar header
rel, relErr := convertToRelSubdirectory(item, path)
if relErr != nil {
- return errors.Wrapf(relErr, "copier: get: error computing path of %q relative to top directory %q", path, item)
+ return fmt.Errorf("copier: get: error computing path of %q relative to top directory %q: %w", path, item, relErr)
}
// prefix the original item's name if we're keeping it
if relNamePrefix != "" {
@@ -1264,7 +1262,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
if d.Type() == os.ModeSymlink {
target, err := os.Readlink(path)
if err != nil {
- return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path)
+ return fmt.Errorf("copier: get: readlink(%q(%q)): %w", rel, path, err)
}
symlinkTarget = target
}
@@ -1284,7 +1282,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
return ok
- } else if os.IsNotExist(errors.Cause(err)) {
+ } else if errors.Is(err, os.ErrNotExist) {
logrus.Warningf("copier: file disappeared while reading: %q", path)
return nil
}
@@ -1294,7 +1292,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
}
// walk the directory tree, checking/adding items individually
if err := filepath.WalkDir(item, walkfn); err != nil {
- return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item)
+ return fmt.Errorf("copier: get: %q(%q): %w", queue[i], item, err)
}
itemsCopied++
} else {
@@ -1313,13 +1311,13 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
continue
}
- return errors.Wrapf(err, "copier: get: %q", queue[i])
+ return fmt.Errorf("copier: get: %q: %w", queue[i], err)
}
itemsCopied++
}
}
if itemsCopied == 0 {
- return errors.Wrapf(syscall.ENOENT, "copier: get: copied no items")
+ return fmt.Errorf("copier: get: copied no items: %w", syscall.ENOENT)
}
return nil
}
@@ -1359,7 +1357,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
// build the header using the name provided
hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget)
if err != nil {
- return errors.Wrapf(err, "error generating tar header for %s (%s)", contentPath, symlinkTarget)
+ return fmt.Errorf("error generating tar header for %s (%s): %w", contentPath, symlinkTarget, err)
}
if name != "" {
hdr.Name = filepath.ToSlash(name)
@@ -1381,7 +1379,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if !options.StripXattrs {
xattrs, err = Lgetxattrs(contentPath)
if err != nil {
- return errors.Wrapf(err, "error getting extended attributes for %q", contentPath)
+ return fmt.Errorf("error getting extended attributes for %q: %w", contentPath, err)
}
}
hdr.Xattrs = xattrs // nolint:staticcheck
@@ -1393,12 +1391,12 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if options.ExpandArchives && isArchivePath(contentPath) {
f, err := os.Open(contentPath)
if err != nil {
- return errors.Wrapf(err, "error opening file for reading archive contents")
+ return fmt.Errorf("error opening file for reading archive contents: %w", err)
}
defer f.Close()
rc, _, err := compression.AutoDecompress(f)
if err != nil {
- return errors.Wrapf(err, "error decompressing %s", contentPath)
+ return fmt.Errorf("error decompressing %s: %w", contentPath, err)
}
defer rc.Close()
tr := tar.NewReader(rc)
@@ -1408,22 +1406,22 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
hdr.Name = handleRename(options.Rename, hdr.Name)
}
if err = tw.WriteHeader(hdr); err != nil {
- return errors.Wrapf(err, "error writing tar header from %q to pipe", contentPath)
+ return fmt.Errorf("error writing tar header from %q to pipe: %w", contentPath, err)
}
if hdr.Size != 0 {
n, err := io.Copy(tw, tr)
if err != nil {
- return errors.Wrapf(err, "error extracting content from archive %s: %s", contentPath, hdr.Name)
+ return fmt.Errorf("error extracting content from archive %s: %s: %w", contentPath, hdr.Name, err)
}
if n != hdr.Size {
- return errors.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
+ return fmt.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
}
tw.Flush()
}
hdr, err = tr.Next()
}
if err != io.EOF {
- return errors.Wrapf(err, "error extracting contents of archive %s", contentPath)
+ return fmt.Errorf("error extracting contents of archive %s: %w", contentPath, err)
}
return nil
}
@@ -1445,7 +1443,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair)
if err != nil {
- return errors.Wrapf(err, "error mapping host filesystem owners %#v to container filesystem owners", hostPair)
+ return fmt.Errorf("error mapping host filesystem owners %#v to container filesystem owners: %w", hostPair, err)
}
}
// force ownership and/or permissions, if requested
@@ -1469,29 +1467,29 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
// open the file first so that we don't write a header for it if we can't actually read it
f, err = os.Open(contentPath)
if err != nil {
- return errors.Wrapf(err, "error opening file for adding its contents to archive")
+ return fmt.Errorf("error opening file for adding its contents to archive: %w", err)
}
defer f.Close()
} else if hdr.Typeflag == tar.TypeDir {
// open the directory file first to make sure we can access it.
f, err = os.Open(contentPath)
if err != nil {
- return errors.Wrapf(err, "error opening directory for adding its contents to archive")
+ return fmt.Errorf("error opening directory for adding its contents to archive: %w", err)
}
defer f.Close()
}
// output the header
if err = tw.WriteHeader(hdr); err != nil {
- return errors.Wrapf(err, "error writing header for %s (%s)", contentPath, hdr.Name)
+ return fmt.Errorf("error writing header for %s (%s): %w", contentPath, hdr.Name, err)
}
if hdr.Typeflag == tar.TypeReg {
// output the content
n, err := io.Copy(tw, f)
if err != nil {
- return errors.Wrapf(err, "error copying %s", contentPath)
+ return fmt.Errorf("error copying %s: %w", contentPath, err)
}
if n != hdr.Size {
- return errors.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
+ return fmt.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
}
tw.Flush()
}
@@ -1542,7 +1540,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
ensureDirectoryUnderRoot := func(directory string) error {
rel, err := convertToRelSubdirectory(req.Root, directory)
if err != nil {
- return errors.Wrapf(err, "%q is not a subdirectory of %q", directory, req.Root)
+ return fmt.Errorf("%q is not a subdirectory of %q: %w", directory, req.Root, err)
}
subdir := ""
for _, component := range strings.Split(rel, string(os.PathSeparator)) {
@@ -1550,7 +1548,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
path := filepath.Join(req.Root, subdir)
if err := os.Mkdir(path, 0700); err == nil {
if err = lchown(path, defaultDirUID, defaultDirGID); err != nil {
- return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, defaultDirUID, defaultDirGID)
+ return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err)
}
// make a conditional note to set this directory's permissions
// later, but not if we already had an explictly-provided mode
@@ -1558,8 +1556,10 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
directoryModes[path] = defaultDirMode
}
} else {
- if !os.IsExist(err) {
- return errors.Wrapf(err, "copier: put: error checking directory %q", path)
+ // FreeBSD can return EISDIR for "mkdir /":
+ // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
+ if !os.IsExist(err) && !errors.Is(err, syscall.EISDIR) {
+ return fmt.Errorf("copier: put: error checking directory %q: %w", path, err)
}
}
}
@@ -1568,14 +1568,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
makeDirectoryWriteable := func(directory string) error {
st, err := os.Lstat(directory)
if err != nil {
- return errors.Wrapf(err, "copier: put: error reading permissions of directory %q", directory)
+ return fmt.Errorf("copier: put: error reading permissions of directory %q: %w", directory, err)
}
mode := st.Mode() & os.ModePerm
if _, ok := directoryModes[directory]; !ok {
directoryModes[directory] = mode
}
if err = os.Chmod(directory, 0o700); err != nil {
- return errors.Wrapf(err, "copier: put: error making directory %q writable", directory)
+ return fmt.Errorf("copier: put: error making directory %q writable: %w", directory, err)
}
return nil
}
@@ -1584,7 +1584,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
if err != nil && os.IsExist(err) {
if req.PutOptions.NoOverwriteDirNonDir {
if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() {
- return 0, errors.Wrapf(err, "copier: put: error creating file at %q", path)
+ return 0, fmt.Errorf("copier: put: error creating file at %q: %w", path, err)
}
}
if err = os.RemoveAll(path); err != nil {
@@ -1595,7 +1595,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
err = os.RemoveAll(path)
}
if err != nil {
- return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path)
+ return 0, fmt.Errorf("copier: put: error removing item to be overwritten %q: %w", path, err)
}
}
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
@@ -1607,12 +1607,12 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
}
if err != nil {
- return 0, errors.Wrapf(err, "copier: put: error opening file %q for writing", path)
+ return 0, fmt.Errorf("copier: put: error opening file %q for writing: %w", path, err)
}
defer f.Close()
n, err := io.Copy(f, tr)
if err != nil {
- return n, errors.Wrapf(err, "copier: put: error writing file %q", path)
+ return n, fmt.Errorf("copier: put: error writing file %q: %w", path, err)
}
return n, nil
}
@@ -1671,7 +1671,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
hostPair, err := idMappings.ToHost(containerPair)
if err != nil {
- return errors.Wrapf(err, "error mapping container filesystem owner 0,0 to host filesystem owners")
+ return fmt.Errorf("error mapping container filesystem owner 0,0 to host filesystem owners: %w", err)
}
hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID
}
@@ -1716,14 +1716,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
switch hdr.Typeflag {
// no type flag for sockets
default:
- return errors.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
+ return fmt.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
case tar.TypeReg, tar.TypeRegA:
var written int64
written, err = createFile(path, tr)
// only check the length if there wasn't an error, which we'll
// check along with errors for other types of entries
if err == nil && written != hdr.Size {
- return errors.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size)
+ return fmt.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size)
}
case tar.TypeLink:
var linkTarget string
@@ -1736,7 +1736,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
hdr.Linkname = handleRename(req.PutOptions.Rename, hdr.Linkname)
}
if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), true, nil); err != nil {
- return errors.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
+ return fmt.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
}
if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) {
if req.PutOptions.NoOverwriteDirNonDir {
@@ -1841,11 +1841,11 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
}
// check for errors
if err != nil {
- return errors.Wrapf(err, "copier: put: error creating %q", path)
+ return fmt.Errorf("copier: put: error creating %q: %w", path, err)
}
// set ownership
if err = lchown(path, hdr.Uid, hdr.Gid); err != nil {
- return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid)
+ return fmt.Errorf("copier: put: error setting ownership of %q to %d:%d: %w", path, hdr.Uid, hdr.Gid, err)
}
// set permissions, except for symlinks, since we don't
// have an lchmod, and directories, which we'll fix up
@@ -1854,7 +1854,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
// write to, but which we'll need to create content in
if hdr.Typeflag != tar.TypeSymlink && hdr.Typeflag != tar.TypeDir {
if err = os.Chmod(path, mode); err != nil {
- return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode)
+ return fmt.Errorf("copier: put: error setting permissions on %q to 0%o: %w", path, mode, err)
}
}
// set other bits that might have been reset by chown()
@@ -1869,14 +1869,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
mode |= syscall.S_ISVTX
}
if err = syscall.Chmod(path, uint32(mode)); err != nil {
- return errors.Wrapf(err, "error setting additional permissions on %q to 0%o", path, mode)
+ return fmt.Errorf("error setting additional permissions on %q to 0%o: %w", path, mode, err)
}
}
// set xattrs, including some that might have been reset by chown()
if !req.PutOptions.StripXattrs {
if err = Lsetxattrs(path, hdr.Xattrs); err != nil { // nolint:staticcheck
if !req.PutOptions.IgnoreXattrErrors {
- return errors.Wrapf(err, "copier: put: error setting extended attributes on %q", path)
+ return fmt.Errorf("copier: put: error setting extended attributes on %q: %w", path, err)
}
}
}
@@ -1885,13 +1885,13 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
hdr.AccessTime = hdr.ModTime
}
if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil {
- return errors.Wrapf(err, "error setting access and modify timestamps on %q to %s and %s", path, hdr.AccessTime, hdr.ModTime)
+ return fmt.Errorf("error setting access and modify timestamps on %q to %s and %s: %w", path, hdr.AccessTime, hdr.ModTime, err)
}
nextHeader:
hdr, err = tr.Next()
}
if err != io.EOF {
- return errors.Wrapf(err, "error reading tar stream: expected EOF")
+ return fmt.Errorf("error reading tar stream: expected EOF: %w", err)
}
return nil
}
@@ -1941,7 +1941,9 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode)
}
} else {
- if !os.IsExist(err) {
+ // FreeBSD can return EISDIR for "mkdir /":
+ // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
+ if !os.IsExist(err) && !errors.Is(err, syscall.EISDIR) {
return errorResponse("copier: mkdir: error checking directory %q: %v", path, err)
}
}
@@ -1968,15 +1970,3 @@ func copierHandlerRemove(req request) *response {
}
return &response{Error: "", Remove: removeResponse{}}
}
-
-func unwrapError(err error) error {
- e := errors.Cause(err)
- for e != nil {
- err = e
- e = stderrors.Unwrap(err)
- if e == err {
- break
- }
- }
- return err
-}
diff --git a/vendor/github.com/containers/buildah/copier/mknod_int.go b/vendor/github.com/containers/buildah/copier/mknod_int.go
new file mode 100644
index 000000000..b9e9f6fef
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/mknod_int.go
@@ -0,0 +1,12 @@
+//go:build !windows && !freebsd
+// +build !windows,!freebsd
+
+package copier
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
diff --git a/vendor/github.com/containers/buildah/copier/mknod_uint64.go b/vendor/github.com/containers/buildah/copier/mknod_uint64.go
new file mode 100644
index 000000000..ccddf36fb
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/mknod_uint64.go
@@ -0,0 +1,12 @@
+//go:build freebsd
+// +build freebsd
+
+package copier
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, uint64(dev))
+}
diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go
index 9fc8fece3..0f2de9354 100644
--- a/vendor/github.com/containers/buildah/copier/syscall_unix.go
+++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go
@@ -1,13 +1,14 @@
+//go:build !windows
// +build !windows
package copier
import (
+ "fmt"
"os"
"syscall"
"time"
- "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -16,13 +17,13 @@ var canChroot = os.Getuid() == 0
func chroot(root string) (bool, error) {
if canChroot {
if err := os.Chdir(root); err != nil {
- return false, errors.Wrapf(err, "error changing to intended-new-root directory %q", root)
+ return false, fmt.Errorf("error changing to intended-new-root directory %q: %w", root, err)
}
if err := unix.Chroot(root); err != nil {
- return false, errors.Wrapf(err, "error chrooting to directory %q", root)
+ return false, fmt.Errorf("error chrooting to directory %q: %w", root, err)
}
if err := os.Chdir(string(os.PathSeparator)); err != nil {
- return false, errors.Wrapf(err, "error changing to just-became-root directory %q", root)
+ return false, fmt.Errorf("error changing to just-became-root directory %q: %w", root, err)
}
return true, nil
}
@@ -45,10 +46,6 @@ func mkfifo(path string, mode uint32) error {
return unix.Mkfifo(path, mode)
}
-func mknod(path string, mode uint32, dev int) error {
- return unix.Mknod(path, mode, dev)
-}
-
func chmod(path string, mode os.FileMode) error {
return os.Chmod(path, mode)
}
diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go
index aef0f4403..15db6418d 100644
--- a/vendor/github.com/containers/buildah/copier/xattrs.go
+++ b/vendor/github.com/containers/buildah/copier/xattrs.go
@@ -1,13 +1,15 @@
+//go:build linux || netbsd || freebsd || darwin
// +build linux netbsd freebsd darwin
package copier
import (
+ "fmt"
"path/filepath"
"strings"
"syscall"
- "github.com/pkg/errors"
+ "github.com/containers/buildah/util"
"golang.org/x/sys/unix"
)
@@ -43,22 +45,22 @@ func Lgetxattrs(path string) (map[string]string, error) {
list = make([]byte, listSize)
size, err := unix.Llistxattr(path, list)
if err != nil {
- if unwrapError(err) == syscall.ERANGE {
+ if util.Cause(err) == syscall.ERANGE {
listSize *= 2
continue
}
- if (unwrapError(err) == syscall.ENOTSUP) || (unwrapError(err) == syscall.ENOSYS) {
+ if (util.Cause(err) == syscall.ENOTSUP) || (util.Cause(err) == syscall.ENOSYS) {
// treat these errors listing xattrs as equivalent to "no xattrs"
list = list[:0]
break
}
- return nil, errors.Wrapf(err, "error listing extended attributes of %q", path)
+ return nil, fmt.Errorf("error listing extended attributes of %q: %w", path, err)
}
list = list[:size]
break
}
if listSize >= maxSize {
- return nil, errors.Errorf("unable to read list of attributes for %q: size would have been too big", path)
+ return nil, fmt.Errorf("unable to read list of attributes for %q: size would have been too big", path)
}
m := make(map[string]string)
for _, attribute := range strings.Split(string(list), string('\000')) {
@@ -69,17 +71,17 @@ func Lgetxattrs(path string) (map[string]string, error) {
attributeValue = make([]byte, attributeSize)
size, err := unix.Lgetxattr(path, attribute, attributeValue)
if err != nil {
- if unwrapError(err) == syscall.ERANGE {
+ if util.Cause(err) == syscall.ERANGE {
attributeSize *= 2
continue
}
- return nil, errors.Wrapf(err, "error getting value of extended attribute %q on %q", attribute, path)
+ return nil, fmt.Errorf("error getting value of extended attribute %q on %q: %w", attribute, path, err)
}
m[attribute] = string(attributeValue[:size])
break
}
if attributeSize >= maxSize {
- return nil, errors.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path)
+ return nil, fmt.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path)
}
}
}
@@ -91,7 +93,7 @@ func Lsetxattrs(path string, xattrs map[string]string) error {
for attribute, value := range xattrs {
if isRelevantXattr(attribute) {
if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil {
- return errors.Wrapf(err, "error setting value of extended attribute %q on %q", attribute, path)
+ return fmt.Errorf("error setting value of extended attribute %q on %q: %w", attribute, path, err)
}
}
}
diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go
index a31ff0309..501f85ff2 100644
--- a/vendor/github.com/containers/buildah/define/build.go
+++ b/vendor/github.com/containers/buildah/define/build.go
@@ -101,6 +101,8 @@ type CommonBuildOptions struct {
Secrets []string
// SSHSources is the available ssh agent connections to forward in the build
SSHSources []string
+ // OCIHooksDir is the location of OCI hooks for the build containers
+ OCIHooksDir []string
}
// BuildOptions can be used to alter how an image is built.
diff --git a/vendor/github.com/containers/buildah/define/mount_freebsd.go b/vendor/github.com/containers/buildah/define/mount_freebsd.go
new file mode 100644
index 000000000..ae5ccc5f5
--- /dev/null
+++ b/vendor/github.com/containers/buildah/define/mount_freebsd.go
@@ -0,0 +1,17 @@
+//go:build freebsd
+// +build freebsd
+
+package define
+
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind = "nullfs"
+
+ // TempDir is the default for storing temporary files
+ TempDir = "/var/tmp"
+)
+
+var (
+ // Mount potions for bind
+ BindOptions = []string{}
+)
diff --git a/vendor/github.com/containers/buildah/define/mount_linux.go b/vendor/github.com/containers/buildah/define/mount_linux.go
new file mode 100644
index 000000000..9d59cb6c3
--- /dev/null
+++ b/vendor/github.com/containers/buildah/define/mount_linux.go
@@ -0,0 +1,17 @@
+//go:build linux
+// +build linux
+
+package define
+
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind = "bind"
+
+ // TempDir is the default for storing temporary files
+ TempDir = "/dev/shm"
+)
+
+var (
+ // Mount potions for bind
+ BindOptions = []string{"bind"}
+)
diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go
index 985558140..07d900811 100644
--- a/vendor/github.com/containers/buildah/define/types.go
+++ b/vendor/github.com/containers/buildah/define/types.go
@@ -3,6 +3,7 @@ package define
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -17,9 +18,9 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/types"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -87,6 +88,8 @@ type IDMappingOptions struct {
HostGIDMapping bool
UIDMap []specs.LinuxIDMapping
GIDMap []specs.LinuxIDMapping
+ AutoUserNs bool
+ AutoUserNsOpts types.AutoUserNsOptions
}
// Secret is a secret source that can be used in a RUN
@@ -120,11 +123,11 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
}
name, err = ioutil.TempDir(dir, prefix)
if err != nil {
- return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url)
+ return "", "", fmt.Errorf("error creating temporary directory for %q: %w", url, err)
}
urlParsed, err := urlpkg.Parse(url)
if err != nil {
- return "", "", errors.Wrapf(err, "error parsing url %q", url)
+ return "", "", fmt.Errorf("error parsing url %q: %w", url, err)
}
if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") {
combinedOutput, gitSubDir, err := cloneToDirectory(url, name)
@@ -132,7 +135,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
if err2 := os.RemoveAll(name); err2 != nil {
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
}
- return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput))
+ return "", "", fmt.Errorf("cloning %q to %q:\n%s: %w", url, name, string(combinedOutput), err)
}
// Check if git url specifies any subdir
// if subdir is there switch to subdir.
@@ -172,7 +175,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
if err2 := os.Remove(name); err2 != nil {
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
}
- return "", "", errors.Errorf("unreachable code reached")
+ return "", "", errors.New("unreachable code reached")
}
func cloneToDirectory(url, dir string) ([]byte, string, error) {
@@ -207,8 +210,11 @@ func downloadToDirectory(url, dir string) error {
return err
}
defer resp.Body.Close()
+ if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest {
+ return fmt.Errorf("invalid response status %d", resp.StatusCode)
+ }
if resp.ContentLength == 0 {
- return errors.Errorf("no contents in %q", url)
+ return fmt.Errorf("no contents in %q", url)
}
if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil {
resp1, err := http.Get(url)
@@ -223,7 +229,7 @@ func downloadToDirectory(url, dir string) error {
dockerfile := filepath.Join(dir, "Dockerfile")
// Assume this is a Dockerfile
if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil {
- return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile)
+ return fmt.Errorf("failed to write %q to %q: %w", url, dockerfile, err)
}
}
return nil
@@ -234,14 +240,14 @@ func stdinToDirectory(dir string) error {
r := bufio.NewReader(os.Stdin)
b, err := ioutil.ReadAll(r)
if err != nil {
- return errors.Wrapf(err, "Failed to read from stdin")
+ return fmt.Errorf("failed to read from stdin: %w", err)
}
reader := bytes.NewReader(b)
if err := chrootarchive.Untar(reader, dir, nil); err != nil {
dockerfile := filepath.Join(dir, "Dockerfile")
// Assume this is a Dockerfile
if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil {
- return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile)
+ return fmt.Errorf("failed to write bytes to %q: %w", dockerfile, err)
}
}
return nil
diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go
index e3bddba20..718316844 100644
--- a/vendor/github.com/containers/buildah/delete.go
+++ b/vendor/github.com/containers/buildah/delete.go
@@ -1,14 +1,12 @@
package buildah
-import (
- "github.com/pkg/errors"
-)
+import "fmt"
// Delete removes the working container. The buildah.Builder object should not
// be used after this method is called.
func (b *Builder) Delete() error {
if err := b.store.DeleteContainer(b.ContainerID); err != nil {
- return errors.Wrapf(err, "error deleting build container %q", b.ContainerID)
+ return fmt.Errorf("error deleting build container %q: %w", b.ContainerID, err)
}
b.MountPoint = ""
b.Container = ""
diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go
index 870ab8d98..9455e3680 100644
--- a/vendor/github.com/containers/buildah/digester.go
+++ b/vendor/github.com/containers/buildah/digester.go
@@ -2,6 +2,7 @@ package buildah
import (
"archive/tar"
+ "errors"
"fmt"
"hash"
"io"
@@ -9,7 +10,6 @@ import (
"time"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
type digester interface {
@@ -68,14 +68,14 @@ func (t *tarFilterer) Close() error {
t.closedLock.Lock()
if t.closed {
t.closedLock.Unlock()
- return errors.Errorf("tar filter is already closed")
+ return errors.New("tar filter is already closed")
}
t.closed = true
t.closedLock.Unlock()
err := t.pipeWriter.Close()
t.wg.Wait()
if err != nil {
- return errors.Wrapf(err, "error closing filter pipe")
+ return fmt.Errorf("error closing filter pipe: %w", err)
}
return t.err
}
@@ -110,7 +110,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
if !skip {
err = tarWriter.WriteHeader(hdr)
if err != nil {
- err = errors.Wrapf(err, "error filtering tar header for %q", hdr.Name)
+ err = fmt.Errorf("error filtering tar header for %q: %w", hdr.Name, err)
break
}
if hdr.Size != 0 {
@@ -122,11 +122,11 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
n, copyErr = io.Copy(tarWriter, tarReader)
}
if copyErr != nil {
- err = errors.Wrapf(copyErr, "error copying content for %q", hdr.Name)
+ err = fmt.Errorf("error copying content for %q: %w", hdr.Name, copyErr)
break
}
if n != hdr.Size {
- err = errors.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n)
+ err = fmt.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n)
break
}
}
@@ -134,7 +134,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
hdr, err = tarReader.Next()
}
if err != io.EOF {
- filterer.err = errors.Wrapf(err, "error reading tar archive")
+ filterer.err = fmt.Errorf("error reading tar archive: %w", err)
break
}
filterer.closedLock.Lock()
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 1fc8c6016..652f09112 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -5,10 +5,10 @@ go 1.17
require (
github.com/containerd/containerd v1.6.6
github.com/containernetworking/cni v1.1.1
- github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9
- github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471
- github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
- github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6
+ github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a
+ github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c
+ github.com/containers/ocicrypt v1.1.5
+ github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a
github.com/docker/distribution v2.8.1+incompatible
github.com/docker/docker v20.10.17+incompatible
github.com/docker/go-units v0.4.0
@@ -19,23 +19,22 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.19.0
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
+ github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
github.com/opencontainers/runc v1.1.3
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.10.1
- github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805
- github.com/pkg/errors v0.9.1
- github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646
+ github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df
+ github.com/seccomp/libseccomp-golang v0.10.0
github.com/sirupsen/logrus v1.8.1
- github.com/spf13/cobra v1.4.0
+ github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.7.2
+ github.com/stretchr/testify v1.8.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
- golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
+ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
+ golang.org/x/sys v0.0.0-20220624220833-87e55d714810
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
)
@@ -51,7 +50,7 @@ require (
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
github.com/containerd/cgroups v1.0.3 // indirect
- github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
github.com/containernetworking/plugins v1.1.1 // indirect
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
@@ -60,20 +59,22 @@ require (
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
- github.com/fsnotify/fsnotify v1.4.9 // indirect
+ github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
+ github.com/google/go-containerregistry v0.10.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/copier v0.3.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.15.6 // indirect
+ github.com/klauspost/compress v1.15.8 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
+ github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
@@ -87,19 +88,23 @@ require (
github.com/morikuni/aec v1.0.0 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/proglottis/gpgme v0.1.2 // indirect
- github.com/prometheus/client_golang v1.11.1 // indirect
+ github.com/proglottis/gpgme v0.1.3 // indirect
+ github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/common v0.30.0 // indirect
+ github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
+ github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
- github.com/sylabs/sif/v2 v2.7.0 // indirect
+ github.com/sylabs/sif/v2 v2.7.1 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
+ github.com/theupdateframework/go-tuf v0.3.0 // indirect
+ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
- github.com/vbauerster/mpb/v7 v7.4.1 // indirect
+ github.com/vbauerster/mpb/v7 v7.4.2 // indirect
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 // indirect
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
@@ -107,12 +112,12 @@ require (
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
go.opencensus.io v0.23.0 // indirect
- golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
+ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
golang.org/x/text v0.3.7 // indirect
- google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 // indirect
- google.golang.org/grpc v1.44.0 // indirect
+ google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
+ google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
- gopkg.in/square/go-jose.v2 v2.5.1 // indirect
+ gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 61bffdb64..cdd6cf3b4 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -20,15 +20,35 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -38,11 +58,13 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
@@ -51,12 +73,20 @@ github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
+github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
@@ -65,6 +95,10 @@ github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
@@ -96,11 +130,12 @@ github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:m
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
+github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/ReneKroon/ttlcache/v2 v2.11.0/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
@@ -117,9 +152,26 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.44.44/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go-v2 v1.16.5/go.mod h1:Wh7MEsmEApyL5hrWzpDkba4gwAPc5/piwLVLFnCxp48=
+github.com/aws/aws-sdk-go-v2/config v1.15.11/go.mod h1:mD5tNFciV7YHNjPpFYqJ6KGpoSfY107oZULvTHIxtbI=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.6/go.mod h1:mQgnRmBPF2S/M01W4T4Obp3ZaZB6o1s/R8cOUda9vtI=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6/go.mod h1:ClLMcuQA/wcHPmOIfNzNI4Y1Q0oDbmEkbYhMFOzHDh8=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12/go.mod h1:Afj/U8svX6sJ77Q+FPWMzabJ9QjbwP32YlopgKALUpg=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6/go.mod h1:FwpAKI+FBPIELJIdmQzlLtRe8LQSOreMcM2wBsPMvvc=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13/go.mod h1:hiM/y1XPp3DoEPhoVEYc/CZcS58dP6RKJRDFp99wdX0=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6/go.mod h1:DxAPjquoEHf3rUHh1b9+47RAaXB8/7cB6jkzCt/GOEI=
+github.com/aws/aws-sdk-go-v2/service/kms v1.17.3/go.mod h1:EKkrWWXwWYf8x3Nrm6Oix3zZP9NRBHqxw5buFGVBHA0=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.9/go.mod h1:UqRD9bBt15P0ofRyDZX6CfsIqPpzeHOhZKWzgSuAzpo=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.7/go.mod h1:lVxTdiiSHY3jb1aeg+BBFtDzZGSUCv6qaNOyEGCJ1AY=
+github.com/aws/smithy-go v1.11.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af/go.mod h1:84CWnaDz4g1tEVnFLnuBigmGK15oPohy0RfvSN8d4eg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -140,6 +192,8 @@ github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx2
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@@ -166,6 +220,8 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -174,11 +230,14 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
@@ -257,8 +316,9 @@ github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oM
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
-github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk=
github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -285,11 +345,11 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
-github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9 h1:sK+TNC8oUBkruZTIqwYJrENetSLQnk+goBVyLiqsJq8=
-github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9/go.mod h1:WBLwq+i7bicCpH54V70HM6s7jqDAESTlYnd05XXp0ac=
-github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4/go.mod h1:OsX9sFexyGF0FCNAjfcVFv3IwMqDyLyV/WQY/roLPcE=
-github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471 h1:2mm1jEFATvpdFfp8lUB/yc237OqwruMvfIPiVn1Wpgg=
-github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471/go.mod h1:KntCBNQn3qOuZmQuJ38ORyTozmWXiuo05Vef2S0Sm5M=
+github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a h1:kdcruVl641VTIm8C3O58WRYcBTbnWCsh6AJymk28ScM=
+github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a/go.mod h1:1dA7JPGoSi83kjf5H4NIrGANyLOULyvFqV1bwvYFEek=
+github.com/containers/image/v5 v5.21.2-0.20220712113758-29aec5f7bbbf/go.mod h1:0+N0ZM9mgMmoZZc6uNcgnEsbX85Ne7b29cIW5lqWwVU=
+github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c h1:ms1Vyzs9Eb17J38aFKrL0+ig2pVwQq3OleaO7VmQuV0=
+github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c/go.mod h1:ykVAVRj4DhQNMHZDVU+KCtXjWBKpqiUe669eF0WBEEc=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -297,13 +357,13 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
-github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f h1:hffElEaoDQfREHltc2wtFPd68BqDmzW6KkEDpuSRBjs=
-github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
+github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ=
+github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
-github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
github.com/containers/storage v1.41.0/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s=
-github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6 h1:AWGEIiqWFIfzTIv4Q3k6vJt/EYyo8dh35ny7WhnOd0s=
-github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6/go.mod h1:6XQ68cEG8ojfP/m3HIupFV1rZsnqeFmaE8N1ctBP94Y=
+github.com/containers/storage v1.41.1-0.20220712184034-d26be7b27860/go.mod h1:uu6HCcijN30xRxW1ZuZRngwFGOlH5NpBWYiNBnDQNRw=
+github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a h1:+arJAP0v8kEy5fKRPIELjarjpwUHhB7SyRE0uFXlyKY=
+github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a/go.mod h1:4DfR+cPpkXKhJnnyydD3z82DXrnTBT63y1k0QWtM2i4=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -311,11 +371,13 @@ github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-oidc/v3 v3.2.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
@@ -324,6 +386,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
@@ -342,11 +405,14 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8=
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v20.10.16+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@@ -354,7 +420,6 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.15+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
@@ -378,6 +443,7 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eggsampler/acme/v3 v3.2.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -389,18 +455,36 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
+github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01/go.mod h1:ypD5nozFk9vcGw1ATYefw6jHe/jZP++Z15/+VTMcWhc=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52/go.mod h1:yIquW87NGRw1FU5p5lEkpnt/QxoH5uPAOUlOVkAUuMg=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
+github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y=
github.com/fsouza/go-dockerclient v1.8.1 h1:a27vHYqNSZz88nUAurI1o6W5PgEt63nAWilOI+j63RE=
github.com/fsouza/go-dockerclient v1.8.1/go.mod h1:zmA2ogSxRnXmbZcy0Aq7yhRoCdP/bDns/qghCK9SWtM=
@@ -410,13 +494,18 @@ github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49P
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
+github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
@@ -442,8 +531,43 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
+github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
+github.com/go-rod/rod v0.107.3/go.mod h1:4SqYRUrcc4dSr9iT36YRZ4hdUAPg3A0O8RhxAMh0eCQ=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
+github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
+github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw=
+github.com/gobuffalo/envy v1.8.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
+github.com/gobuffalo/envy v1.9.0/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
+github.com/gobuffalo/fizz v1.10.0/go.mod h1:J2XGPO0AfJ1zKw7+2BA+6FEGAkyEsdCOLvN93WCT2WI=
+github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
+github.com/gobuffalo/genny/v2 v2.0.5/go.mod h1:kRkJuAw9mdI37AiEYjV4Dl+TgkBDYf8HZVjLkqe5eBg=
+github.com/gobuffalo/github_flavored_markdown v1.1.0/go.mod h1:TSpTKWcRTI0+v7W3x8dkSKMLJSUpuVitlptCkpeY8ic=
+github.com/gobuffalo/helpers v0.6.0/go.mod h1:pncVrer7x/KRvnL5aJABLAuT/RhKRR9klL6dkUOhyv8=
+github.com/gobuffalo/helpers v0.6.1/go.mod h1:wInbDi0vTJKZBviURTLRMFLE4+nF2uRuuL2fnlYo7w4=
+github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM=
+github.com/gobuffalo/nulls v0.2.0/go.mod h1:w4q8RoSCEt87Q0K0sRIZWYeIxkxog5mh3eN3C/n+dUc=
+github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q=
+github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI=
+github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g=
+github.com/gobuffalo/plush/v4 v4.0.0/go.mod h1:ErFS3UxKqEb8fpFJT7lYErfN/Nw6vHGiDMTjxpk5bQ0=
+github.com/gobuffalo/pop/v5 v5.3.1/go.mod h1:vcEDhh6cJ3WVENqJDFt/6z7zNb7lLnlN8vj3n5G9rYA=
+github.com/gobuffalo/tags/v3 v3.0.2/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA=
+github.com/gobuffalo/tags/v3 v3.1.0/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA=
+github.com/gobuffalo/validate/v3 v3.0.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0=
+github.com/gobuffalo/validate/v3 v3.1.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
@@ -451,6 +575,7 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -460,6 +585,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -495,9 +622,13 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -510,9 +641,12 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/go-containerregistry v0.10.0 h1:qd/fv2nQajGZJenaNcdaghlwSPjQ0NphN9hzArr2WWg=
+github.com/google/go-containerregistry v0.10.0/go.mod h1:C7uwbB1QUAtvnknyd3ethxJRd4gtEjU/9WLXzckfI1Y=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -521,6 +655,7 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -533,6 +668,9 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -540,16 +678,23 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
@@ -569,26 +714,61 @@ github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FK
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
+github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw=
+github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
+github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
+github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/vault/api v1.7.2/go.mod h1:xbfA+1AvxFseDzxxdWaL0uO99n1+tndus4GCrtouy0M=
+github.com/hashicorp/vault/sdk v0.5.1/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU=
+github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
+github.com/honeycombio/beeline-go v1.1.1 h1:sU8r4ae34uEL3/CguSl8Mr+Asz9DL1nfH9Wwk85Pc7U=
+github.com/honeycombio/beeline-go v1.1.1/go.mod h1:kN0cfUGBMfA87DyCYbiiLoSzWsnw3bluZvNEWtatHxk=
+github.com/honeycombio/libhoney-go v1.15.2 h1:5NGcjOxZZma13dmzNcl3OtGbF1hECA0XHJNHEb2t2ck=
+github.com/honeycombio/libhoney-go v1.15.2/go.mod h1:JzhRPYgoBCd0rZvudrqmej4Ntx0w7AT3wAJpf5+t1WA=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -596,24 +776,66 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
+github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -623,18 +845,22 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
-github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
-github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA=
+github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -648,10 +874,23 @@ github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/labstack/echo/v4 v4.3.0/go.mod h1:PvmtTvhVqKDzDQy4d3bWzPjZLzom4iQbAZy2sgZ/qI8=
+github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e h1:1aV3EJ4ZMsc63MFU4rB+ccSEhZvvVD71T9RA4Rqd3hI=
+github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e/go.mod h1:Bl3mfF2LHYepsU2XfzMceIglyByfPe1IFAXtO+p37Qk=
+github.com/letsencrypt/challtestsrv v1.2.1/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc=
+github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -661,10 +900,28 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
+github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc=
+github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
@@ -672,25 +929,42 @@ github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
+github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
@@ -718,6 +992,7 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
@@ -730,11 +1005,14 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -746,11 +1024,13 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
@@ -758,9 +1038,12 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -799,16 +1082,20 @@ github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w=
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
-github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805 h1:bfLqBGqF04GAoqbMkSrd5VPk/t66OnP0bTTisMaF4ro=
-github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
+github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df h1:vf6pdI10F2Tim5a9JKiVVl4/dpNz1OEhz4EnfLdLtiA=
+github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -817,19 +1104,21 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
-github.com/proglottis/gpgme v0.1.2 h1:dKlhDqJ0kdEt+YHCD8FQEUdF9cJj/+mbJUNyUGNAEzY=
-github.com/proglottis/gpgme v0.1.2/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
+github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
+github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -841,10 +1130,12 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -864,11 +1155,18 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
@@ -879,36 +1177,50 @@ github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624
github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds=
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
+github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U=
+github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
+github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 h1:5TPCWtlOsaCiuAaglfZX7obd+/kuE8lGUhsVQzmQSaI=
+github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1/go.mod h1:y83NePRM98MJpbGgBgi54UZduhG0aD7lYngAVCx+i/E=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
+github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -925,6 +1237,7 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -933,23 +1246,33 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
-github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk=
-github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
+github.com/sylabs/sif/v2 v2.7.1 h1:XXt9AP39sQfsMCGOGQ/XP9H47yqZOvAonalkaCaNIYM=
+github.com/sylabs/sif/v2 v2.7.1/go.mod h1:bBse2nEFd3yHkmq6KmAOFEWQg5LdFYiQUdVcgamxlc8=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/theupdateframework/go-tuf v0.3.0 h1:od2sc5+BSkKZhmUG2o2rmruy0BGSmhrbDhCnpxh87X8=
+github.com/theupdateframework/go-tuf v0.3.0/go.mod h1:E5XP0wXitrFUHe4b8cUcAAdxBW4LbfnqF4WXXGLgWNo=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@@ -958,10 +1281,13 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
-github.com/vbauerster/mpb/v7 v7.4.1 h1:NhLMWQ3gNg2KJR8oeA9lO8Xvq+eNPmixDmB6JEQOUdA=
-github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo=
+github.com/vbauerster/mpb/v7 v7.4.2 h1:n917F4d8EWdUKc9c81wFkksyG6P6Mg7IETfKCE1Xqng=
+github.com/vbauerster/mpb/v7 v7.4.2/go.mod h1:UmOiIUI8aPqWXIps0ciik3RKMdzx7+ooQpq+fBcXwBA=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
@@ -972,6 +1298,12 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U=
+github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
+github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY=
+github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
+github.com/weppos/publicsuffix-go v0.15.1-0.20210807195340-dc689ff0bb59/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
+github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -984,6 +1316,11 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
+github.com/ysmood/got v0.31.2/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY=
+github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM=
+github.com/ysmood/gson v0.7.1/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
+github.com/ysmood/leakless v0.7.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -993,6 +1330,12 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
+github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
+github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
+github.com/zmap/zcrypto v0.0.0-20210811211718-6f9bc4aff20f/go.mod h1:y/9hjFEub4DtQxTHp/pqticBgdYeCwL97vojV3lsvHY=
+github.com/zmap/zlint/v3 v3.3.1-0.20211019173530-cb17369b4628/go.mod h1:O+4OXRfNLKqOyDl4eKZ1SBlYudKGUBGRFcv+m1KLr28=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
@@ -1016,54 +1359,74 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/contrib/propagators v0.19.0 h1:HrixVNZYFjUl/Db+Tr3DhqzLsVW9GeVf/Gye+C5dNUY=
+go.opentelemetry.io/contrib/propagators v0.19.0/go.mod h1:4QOdZClXISU5S43xZxk5tYaWcpb+lehqfKtE6PK6msE=
+go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y=
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
+go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY=
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+goji.io/v3 v3.0.0/go.mod h1:c02FFnNiVNCDo+DpR2IhBQpM9r5G1BG/MkHNTPUJ13U=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1102,6 +1465,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1134,12 +1498,14 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -1153,16 +1519,26 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220524220425-1d687d428aca/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1175,7 +1551,16 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1186,8 +1571,10 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1197,7 +1584,9 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1213,11 +1602,13 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1227,6 +1618,7 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1247,6 +1639,7 @@ golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1265,6 +1658,8 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1275,25 +1670,41 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1317,9 +1728,11 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1331,6 +1744,7 @@ golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
@@ -1340,6 +1754,7 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1352,6 +1767,7 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1360,6 +1776,7 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -1380,16 +1797,24 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -1412,14 +1837,35 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1465,13 +1911,49 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 h1:U9V52f6rAgINH7kT+musA1qF8kWyVOxzF8eYuOVuFwQ=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f h1:hJ/Y5SqPXbarffmAsApliUlcvMU+wScNGfyop4bZm8o=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1495,12 +1977,22 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1518,6 +2010,8 @@ google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscL
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1529,14 +2023,17 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
+gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
+gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
@@ -1552,6 +2049,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 3c7bea432..ec427da1c 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -5,6 +5,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -28,7 +29,6 @@ import (
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -167,7 +167,7 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
var uidMap, gidMap []idtools.IDMap
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error mounting container %q", i.containerID)
+ return nil, nil, fmt.Errorf("error mounting container %q: %w", i.containerID, err)
}
pipeReader, pipeWriter := io.Pipe()
errChan := make(chan error, 1)
@@ -190,11 +190,11 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
}()
return ioutils.NewReadCloserWrapper(pipeReader, func() error {
if err = pipeReader.Close(); err != nil {
- err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID)
+ err = fmt.Errorf("error closing tar archive of container %q: %w", i.containerID, err)
}
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
if err2 != nil {
- err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID)
+ err2 = fmt.Errorf("error unmounting container %q: %w", i.containerID, err2)
}
err = err2
}
@@ -282,7 +282,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
manifestType := i.preferredManifestType
// If it's not a format we support, return an error.
if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType {
- return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
+ return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
}
// Start building the list of layers using the read-write layer.
@@ -290,7 +290,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
layerID := i.layerID
layer, err := i.store.Layer(layerID)
if err != nil {
- return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
+ return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
}
// Walk the list of parent layers, prepending each as we go. If we're squashing,
// stop at the layer ID of the top layer, which we won't really be using anyway.
@@ -303,7 +303,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
layer, err = i.store.Layer(layerID)
if err != nil {
- return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
+ return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
}
}
logrus.Debugf("layer list: %q", layers)
@@ -311,7 +311,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Make a temporary directory to hold blobs.
path, err := ioutil.TempDir(os.TempDir(), define.Package)
if err != nil {
- return nil, errors.Wrapf(err, "error creating temporary directory to hold layer blobs")
+ return nil, fmt.Errorf("error creating temporary directory to hold layer blobs: %w", err)
}
logrus.Debugf("using %q to hold temporary data", path)
defer func() {
@@ -343,7 +343,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Look up this layer.
layer, err := i.store.Layer(layerID)
if err != nil {
- return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
+ return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
}
// If we're up to the final layer, but we don't want to include
// a diff for it, we're done.
@@ -400,7 +400,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
- return nil, errors.Wrapf(err, "error extracting %s", what)
+ return nil, fmt.Errorf("error extracting %s: %w", what, err)
}
}
srcHasher := digest.Canonical.Digester()
@@ -408,7 +408,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
rc.Close()
- return nil, errors.Wrapf(err, "error opening file for %s", what)
+ return nil, fmt.Errorf("error opening file for %s: %w", what, err)
}
counter := ioutils.NewWriteCounter(layerFile)
@@ -427,7 +427,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil {
layerFile.Close()
rc.Close()
- return nil, errors.Wrapf(err, "error compressing %s", what)
+ return nil, fmt.Errorf("error compressing %s: %w", what, err)
}
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
// Use specified timestamps in the layer, if we're doing that for
@@ -468,11 +468,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
if err != nil {
- return nil, errors.Wrapf(err, "error storing %s to file", what)
+ return nil, fmt.Errorf("error storing %s to file: %w", what, err)
}
if i.compression == archive.Uncompressed {
if size != counter.Count {
- return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
+ return nil, fmt.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
}
} else {
size = counter.Count
@@ -481,7 +481,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Rename the layer so that we can more easily find it by digest later.
finalBlobName := filepath.Join(path, destHasher.Digest().String())
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
- return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), finalBlobName)
+ return nil, fmt.Errorf("error storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
}
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests.
@@ -574,11 +574,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if baseImageHistoryLen != 0 {
expectedDiffIDs := expectedOCIDiffIDs(oimage)
if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
- return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
+ return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
}
expectedDiffIDs = expectedDockerDiffIDs(dimage)
if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
- return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
+ return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
}
}
}
@@ -586,7 +586,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Encode the image configuration blob.
oconfig, err := json.Marshal(&oimage)
if err != nil {
- return nil, errors.Wrapf(err, "error encoding %#v as json", oimage)
+ return nil, fmt.Errorf("error encoding %#v as json: %w", oimage, err)
}
logrus.Debugf("OCIv1 config = %s", oconfig)
@@ -598,14 +598,14 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Encode the manifest.
omanifestbytes, err := json.Marshal(&omanifest)
if err != nil {
- return nil, errors.Wrapf(err, "error encoding %#v as json", omanifest)
+ return nil, fmt.Errorf("error encoding %#v as json: %w", omanifest, err)
}
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
// Encode the image configuration blob.
dconfig, err := json.Marshal(&dimage)
if err != nil {
- return nil, errors.Wrapf(err, "error encoding %#v as json", dimage)
+ return nil, fmt.Errorf("error encoding %#v as json: %w", dimage, err)
}
logrus.Debugf("Docker v2s2 config = %s", dconfig)
@@ -617,7 +617,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Encode the manifest.
dmanifestbytes, err := json.Marshal(&dmanifest)
if err != nil {
- return nil, errors.Wrapf(err, "error encoding %#v as json", dmanifest)
+ return nil, fmt.Errorf("error encoding %#v as json: %w", dmanifest, err)
}
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
@@ -654,7 +654,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) {
- return nil, errors.Errorf("can't write to a container")
+ return nil, errors.New("can't write to a container")
}
func (i *containerImageRef) DockerReference() reference.Named {
@@ -688,7 +688,7 @@ func (i *containerImageRef) Transport() types.ImageTransport {
func (i *containerImageSource) Close() error {
err := os.RemoveAll(i.path)
if err != nil {
- return errors.Wrapf(err, "error removing layer blob directory")
+ return fmt.Errorf("error removing layer blob directory: %w", err)
}
return nil
}
@@ -754,13 +754,13 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
}
if err != nil || layerReadCloser == nil || size == -1 {
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
- return nil, -1, errors.Wrap(err, "error opening layer blob")
+ return nil, -1, fmt.Errorf("error opening layer blob: %w", err)
}
logrus.Debugf("reading layer %q", blob.Digest.String())
closer := func() error {
logrus.Debugf("finished reading layer %q", blob.Digest.String())
if err := layerReadCloser.Close(); err != nil {
- return errors.Wrapf(err, "error closing layer %q after reading", blob.Digest.String())
+ return fmt.Errorf("error closing layer %q after reading: %w", blob.Digest.String(), err)
}
return nil
}
@@ -771,7 +771,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
var name reference.Named
container, err := b.store.Container(b.ContainerID)
if err != nil {
- return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID)
+ return nil, fmt.Errorf("error locating container %q: %w", b.ContainerID, err)
}
if len(container.Names) > 0 {
if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
@@ -788,11 +788,11 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
}
oconfig, err := json.Marshal(&b.OCIv1)
if err != nil {
- return nil, errors.Wrapf(err, "error encoding OCI-format image configuration %#v", b.OCIv1)
+ return nil, fmt.Errorf("error encoding OCI-format image configuration %#v: %w", b.OCIv1, err)
}
dconfig, err := json.Marshal(&b.Docker)
if err != nil {
- return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker)
+ return nil, fmt.Errorf("error encoding docker-format image configuration %#v: %w", b.Docker, err)
}
var created *time.Time
if options.HistoryTimestamp != nil {
@@ -848,7 +848,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
src, err := b.makeContainerImageRef(options)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating image reference for container %q to extract its contents", b.ContainerID)
+ return nil, nil, fmt.Errorf("error creating image reference for container %q to extract its contents: %w", b.ContainerID, err)
}
return src.extractRootfs(opts)
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 95bdc54ed..e098db473 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -3,6 +3,7 @@ package imagebuildah
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -33,7 +34,6 @@ import (
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
)
@@ -68,10 +68,10 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
if len(paths) == 0 {
- return "", nil, errors.Errorf("error building: no dockerfiles specified")
+ return "", nil, errors.New("error building: no dockerfiles specified")
}
if len(options.Platforms) > 1 && options.IIDFile != "" {
- return "", nil, errors.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile)
+ return "", nil, fmt.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile)
}
logger := logrus.New()
@@ -94,7 +94,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
continue
}
if _, err := util.VerifyTagName(tag); err != nil {
- return "", nil, errors.Wrapf(err, "tag %s", tag)
+ return "", nil, fmt.Errorf("tag %s: %w", tag, err)
}
}
@@ -109,7 +109,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
if resp.ContentLength == 0 {
resp.Body.Close()
- return "", nil, errors.Errorf("no contents in %q", dfile)
+ return "", nil, fmt.Errorf("no contents in %q", dfile)
}
data = resp.Body
} else {
@@ -127,31 +127,22 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
var contents *os.File
- // If given a directory, add '/Dockerfile' to it.
+ // If given a directory error out since `-f` does not supports path to directory
if dinfo.Mode().IsDir() {
- for _, file := range []string{"Containerfile", "Dockerfile"} {
- f := filepath.Join(dfile, file)
- logger.Debugf("reading local %q", f)
- contents, err = os.Open(f)
- if err == nil {
- break
- }
- }
- } else {
- contents, err = os.Open(dfile)
+ return "", nil, fmt.Errorf("containerfile: %q cannot be path to a directory", dfile)
}
-
+ contents, err = os.Open(dfile)
if err != nil {
return "", nil, err
}
dinfo, err = contents.Stat()
if err != nil {
contents.Close()
- return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
+ return "", nil, fmt.Errorf("error reading info about %q: %w", dfile, err)
}
if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
contents.Close()
- return "", nil, errors.Errorf("no contents in %q", dfile)
+ return "", nil, fmt.Errorf("no contents in %q", dfile)
}
data = contents
}
@@ -258,7 +249,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
logFile := platformOptions.LogFile + "_" + platformOptions.OS + "_" + platformOptions.Architecture
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
if err != nil {
- return errors.Wrapf(err, "opening logfile: %q", logFile)
+ return fmt.Errorf("opening logfile: %q: %w", logFile, err)
}
defer f.Close()
loggerPerPlatform = logrus.New()
@@ -302,7 +293,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
// partially-populated state at any point if we're creating it
// fresh.
list, err := rt.LookupManifestList(manifestList)
- if err != nil && errors.Cause(err) == storage.ErrImageUnknown {
+ if err != nil && errors.Is(err, storage.ErrImageUnknown) {
list, err = rt.CreateManifestList(manifestList)
}
if err != nil {
@@ -363,7 +354,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, dockerfiles []string, dockerfilecontents [][]byte) (string, reference.Canonical, error) {
mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
if err != nil {
- return "", nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfiles[0])
+ return "", nil, fmt.Errorf("error parsing main Dockerfile: %s: %w", dockerfiles[0], err)
}
warnOnUnsetBuildArgs(logger, mainNode, options.Args)
@@ -405,7 +396,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
for i, d := range dockerfilecontents[1:] {
additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
if err != nil {
- return "", nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfiles[i])
+ return "", nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfiles[i], err)
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
@@ -431,7 +422,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value)
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine))
if err != nil {
- return "", nil, errors.Wrapf(err, "error while adding additional LABEL steps")
+ return "", nil, fmt.Errorf("error while adding additional LABEL steps: %w", err)
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
@@ -440,22 +431,22 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
exec, err := newExecutor(logger, logPrefix, store, options, mainNode)
if err != nil {
- return "", nil, errors.Wrapf(err, "error creating build executor")
+ return "", nil, fmt.Errorf("error creating build executor: %w", err)
}
b := imagebuilder.NewBuilder(options.Args)
defaultContainerConfig, err := config.Default()
if err != nil {
- return "", nil, errors.Wrapf(err, "failed to get container config")
+ return "", nil, fmt.Errorf("failed to get container config: %w", err)
}
b.Env = append(defaultContainerConfig.GetDefaultEnv(), b.Env...)
stages, err := imagebuilder.NewStages(mainNode, b)
if err != nil {
- return "", nil, errors.Wrap(err, "error reading multiple stages")
+ return "", nil, fmt.Errorf("error reading multiple stages: %w", err)
}
if options.Target != "" {
stagesTargeted, ok := stages.ThroughTarget(options.Target)
if !ok {
- return "", nil, errors.Errorf("The target %q was not found in the provided Dockerfile", options.Target)
+ return "", nil, fmt.Errorf("The target %q was not found in the provided Dockerfile", options.Target)
}
stages = stagesTargeted
}
@@ -506,7 +497,7 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
if flags, ok := os.LookupEnv("BUILDAH_CPPFLAGS"); ok {
args, err := shellwords.Parse(flags)
if err != nil {
- return nil, errors.Errorf("error parsing BUILDAH_CPPFLAGS %q: %v", flags, err)
+ return nil, fmt.Errorf("error parsing BUILDAH_CPPFLAGS %q: %v", flags, err)
}
cppArgs = append(cppArgs, args...)
}
@@ -517,14 +508,14 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
cmd.Stderr = &stderrBuffer
if err = cmd.Start(); err != nil {
- return nil, errors.Wrapf(err, "preprocessing %s", containerfile)
+ return nil, fmt.Errorf("preprocessing %s: %w", containerfile, err)
}
if err = cmd.Wait(); err != nil {
if stderrBuffer.Len() != 0 {
logger.Warnf("Ignoring %s\n", stderrBuffer.String())
}
if stdoutBuffer.Len() == 0 {
- return nil, errors.Wrapf(err, "error preprocessing %s: preprocessor produced no output", containerfile)
+ return nil, fmt.Errorf("error preprocessing %s: preprocessor produced no output: %w", containerfile, err)
}
}
return &stdoutBuffer, nil
@@ -536,18 +527,18 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) {
baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args, additionalBuildContext)
if err != nil {
- return nil, errors.Wrapf(err, "determining list of base images")
+ return nil, fmt.Errorf("determining list of base images: %w", err)
}
logrus.Debugf("unresolved base images: %v", baseImages)
if len(baseImages) == 0 {
- return nil, errors.Wrapf(err, "build uses no non-scratch base images")
+ return nil, fmt.Errorf("build uses no non-scratch base images: %w", err)
}
targetPlatforms := make(map[string]struct{})
var platformList []struct{ OS, Arch, Variant string }
for baseImageIndex, baseImage := range baseImages {
resolved, err := shortnames.Resolve(systemContext, baseImage)
if err != nil {
- return nil, errors.Wrapf(err, "resolving image name %q", baseImage)
+ return nil, fmt.Errorf("resolving image name %q: %w", baseImage, err)
}
var manifestBytes []byte
var manifestType string
@@ -582,27 +573,27 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
}
if len(manifestBytes) == 0 {
if len(resolved.PullCandidates) > 0 {
- return nil, errors.Errorf("base image name %q didn't resolve to a manifest list", baseImage)
+ return nil, fmt.Errorf("base image name %q didn't resolve to a manifest list", baseImage)
}
- return nil, errors.Errorf("base image name %q didn't resolve to anything", baseImage)
+ return nil, fmt.Errorf("base image name %q didn't resolve to anything", baseImage)
}
if manifestType != v1.MediaTypeImageIndex {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "parsing manifest list from base image %q", baseImage)
+ return nil, fmt.Errorf("parsing manifest list from base image %q: %w", baseImage, err)
}
list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex)
if err != nil {
- return nil, errors.Wrapf(err, "converting manifest list from base image %q to v2s2 list", baseImage)
+ return nil, fmt.Errorf("converting manifest list from base image %q to v2s2 list: %w", baseImage, err)
}
manifestBytes, err = list.Serialize()
if err != nil {
- return nil, errors.Wrapf(err, "encoding converted v2s2 manifest list for base image %q", baseImage)
+ return nil, fmt.Errorf("encoding converted v2s2 manifest list for base image %q: %w", baseImage, err)
}
}
index, err := manifest.OCI1IndexFromManifest(manifestBytes)
if err != nil {
- return nil, errors.Wrapf(err, "decoding manifest list for base image %q", baseImage)
+ return nil, fmt.Errorf("decoding manifest list for base image %q: %w", baseImage, err)
}
if baseImageIndex == 0 {
// populate the list with the first image's normalized platforms
@@ -641,7 +632,7 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
for platform := range targetPlatforms {
platform, err := platforms.Parse(platform)
if err != nil {
- return nil, errors.Wrapf(err, "parsing platform double/triple %q", platform)
+ return nil, fmt.Errorf("parsing platform double/triple %q: %w", platform, err)
}
platformList = append(platformList, struct{ OS, Arch, Variant string }{
OS: platform.OS,
@@ -665,13 +656,13 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext) ([]string, error) {
mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
if err != nil {
- return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0])
+ return nil, fmt.Errorf("error parsing main Dockerfile: %s: %w", dockerfilenames[0], err)
}
for i, d := range dockerfilecontents[1:] {
additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
if err != nil {
- return nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfilenames[i])
+ return nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfilenames[i], err)
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
@@ -679,12 +670,12 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri
b := imagebuilder.NewBuilder(args)
defaultContainerConfig, err := config.Default()
if err != nil {
- return nil, errors.Wrapf(err, "failed to get container config")
+ return nil, fmt.Errorf("failed to get container config: %w", err)
}
b.Env = defaultContainerConfig.GetDefaultEnv()
stages, err := imagebuilder.NewStages(mainNode, b)
if err != nil {
- return nil, errors.Wrap(err, "error reading multiple stages")
+ return nil, fmt.Errorf("error reading multiple stages: %w", err)
}
var baseImages []string
nicknames := make(map[string]bool)
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index a33e1ffdd..c9e2493b3 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -2,6 +2,7 @@ package imagebuildah
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -34,7 +35,6 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
)
@@ -151,7 +151,7 @@ type imageTypeAndHistoryAndDiffIDs struct {
func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node) (*Executor, error) {
defaultContainerConfig, err := config.Default()
if err != nil {
- return nil, errors.Wrapf(err, "failed to get container config")
+ return nil, fmt.Errorf("failed to get container config: %w", err)
}
excludes := options.Excludes
@@ -396,7 +396,7 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu
b.stagesSemaphore.Release(1)
time.Sleep(time.Millisecond * 10)
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
- return true, errors.Wrapf(err, "error reacquiring job semaphore")
+ return true, fmt.Errorf("error reacquiring job semaphore: %w", err)
}
}
}
@@ -411,20 +411,20 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID
}
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
if err != nil {
- return "", nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID)
+ return "", nil, nil, fmt.Errorf("error getting image reference %q: %w", imageID, err)
}
ref, err := imageRef.NewImage(ctx, nil)
if err != nil {
- return "", nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
+ return "", nil, nil, fmt.Errorf("error creating new image from reference to image %q: %w", imageID, err)
}
defer ref.Close()
oci, err := ref.OCIConfig(ctx)
if err != nil {
- return "", nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
+ return "", nil, nil, fmt.Errorf("error getting possibly-converted OCI config of image %q: %w", imageID, err)
}
manifestBytes, manifestFormat, err := ref.Manifest(ctx)
if err != nil {
- return "", nil, nil, errors.Wrapf(err, "error getting manifest of image %q", imageID)
+ return "", nil, nil, fmt.Errorf("error getting manifest of image %q: %w", imageID, err)
}
if manifestFormat == "" && len(manifestBytes) > 0 {
manifestFormat = manifest.GuessMIMEType(manifestBytes)
@@ -510,6 +510,25 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
return imageID, ref, nil
}
+type stageDependencyInfo struct {
+ Name string
+ Position int
+ Needs []string
+ NeededByTarget bool
+}
+
+// Marks `NeededByTarget` as true for the given stage and all its dependency stages as true recursively.
+func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo, stage string) {
+ if stageDependencyInfo, ok := dependencyMap[stage]; ok {
+ if !stageDependencyInfo.NeededByTarget {
+ stageDependencyInfo.NeededByTarget = true
+ for _, need := range stageDependencyInfo.Needs {
+ markDependencyStagesForTarget(dependencyMap, need)
+ }
+ }
+ }
+}
+
// Build takes care of the details of running Prepare/Execute/Commit/Delete
// over each of the one or more parsed Dockerfiles and stages.
func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) {
@@ -566,7 +585,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
if _, err := b.store.DeleteImage(removeID, true); err != nil {
logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err)
- if b.forceRmIntermediateCtrs || errors.Cause(err) != storage.ErrImageUsedByContainer {
+ if b.forceRmIntermediateCtrs || !errors.Is(err, storage.ErrImageUsedByContainer) {
lastErr = err
}
}
@@ -588,15 +607,20 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
if err == nil {
err = cleanupErr
} else {
- err = errors.Wrap(err, cleanupErr.Error())
+ err = fmt.Errorf("%v: %w", cleanupErr.Error(), err)
}
}
}()
+ // dependencyMap contains dependencyInfo for each stage,
+ // dependencyInfo is used later to mark if a particular
+ // stage is needed by target or not.
+ dependencyMap := make(map[string]*stageDependencyInfo)
// Build maps of every named base image and every referenced stage root
// filesystem. Individual stages can use them to determine whether or
// not they can skip certain steps near the end of their stages.
for stageIndex, stage := range stages {
+ dependencyMap[stage.Name] = &stageDependencyInfo{Name: stage.Name, Position: stage.Position}
node := stage.Node // first line
for node != nil { // each line
for _, child := range node.Children { // tokens on this line, though we only care about the first
@@ -620,10 +644,20 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
userArgs := argsMapToSlice(stage.Builder.Args)
baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
if err != nil {
- return "", nil, errors.Wrapf(err, "while replacing arg variables with values for format %q", base)
+ return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
}
b.baseMap[baseWithArg] = true
logrus.Debugf("base for stage %d: %q", stageIndex, base)
+ // Check if selected base is not an additional
+ // build context and if base is a valid stage
+ // add it to current stage's dependency tree.
+ if _, ok := b.additionalBuildContexts[baseWithArg]; !ok {
+ if _, ok := dependencyMap[baseWithArg]; ok {
+ // update current stage's dependency info
+ currentStageInfo := dependencyMap[stage.Name]
+ currentStageInfo.Needs = append(currentStageInfo.Needs, baseWithArg)
+ }
+ }
}
}
case "ADD", "COPY":
@@ -636,12 +670,68 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
rootfs := strings.TrimPrefix(flag, "--from=")
b.rootfsMap[rootfs] = true
logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs)
+ // Populate dependency tree and check
+ // if following ADD or COPY needs any other
+ // stage.
+ stageName := rootfs
+ // If --from=<index> convert index to name
+ if index, err := strconv.Atoi(stageName); err == nil {
+ stageName = stages[index].Name
+ }
+ // Check if selected base is not an additional
+ // build context and if base is a valid stage
+ // add it to current stage's dependency tree.
+ if _, ok := b.additionalBuildContexts[stageName]; !ok {
+ if _, ok := dependencyMap[stageName]; ok {
+ // update current stage's dependency info
+ currentStageInfo := dependencyMap[stage.Name]
+ currentStageInfo.Needs = append(currentStageInfo.Needs, stageName)
+ }
+ }
+ }
+ }
+ case "RUN":
+ for _, flag := range child.Flags { // flags for this instruction
+ // We need to populate dependency tree of stages
+ // if it is using `--mount` and `from=` field is set
+ // and `from=` points to a stage consider it in
+ // dependency calculation.
+ if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") {
+ mountFlags := strings.TrimPrefix(flag, "--mount=")
+ fields := strings.Split(mountFlags, ",")
+ for _, field := range fields {
+ if strings.HasPrefix(field, "from=") {
+ fromField := strings.SplitN(field, "=", 2)
+ if len(fromField) > 1 {
+ mountFrom := fromField[1]
+ // Check if this base is a stage if yes
+ // add base to current stage's dependency tree
+ // but also confirm if this is not in additional context.
+ if _, ok := b.additionalBuildContexts[mountFrom]; !ok {
+ if _, ok := dependencyMap[mountFrom]; ok {
+ // update current stage's dependency info
+ currentStageInfo := dependencyMap[stage.Name]
+ currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom)
+ }
+ }
+ } else {
+ return "", nil, fmt.Errorf("invalid value for field `from=`: %q", fromField[1])
+ }
+ }
+ }
}
}
}
}
node = node.Next // next line
}
+ // Last stage is always target stage.
+ // Since last/target stage is processed
+ // let's calculate dependency map of stages
+ // so we can mark stages which can be skipped.
+ if stage.Position == (len(stages) - 1) {
+ markDependencyStagesForTarget(dependencyMap, stage.Name)
+ }
}
type Result struct {
@@ -685,9 +775,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
if cancel || cleanupStages == nil {
var err error
if stages[index].Name != strconv.Itoa(index) {
- err = errors.Errorf("not building stage %d: build canceled", index)
+ err = fmt.Errorf("not building stage %d: build canceled", index)
} else {
- err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name)
+ err = fmt.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name)
}
ch <- Result{
Index: index,
@@ -695,6 +785,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
return
}
+ // Skip stage if it is not needed by TargetStage
+ // or any of its dependency stages.
+ if stageDependencyInfo, ok := dependencyMap[stages[index].Name]; ok {
+ if !stageDependencyInfo.NeededByTarget {
+ logrus.Debugf("Skipping stage with Name %q and index %d since its not needed by the target stage", stages[index].Name, index)
+ ch <- Result{
+ Index: index,
+ Error: nil,
+ }
+ return
+ }
+ }
stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
if stageErr != nil {
cancel = true
@@ -765,18 +867,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
case is.Transport.Name():
img, err := is.Transport.GetStoreImage(b.store, dest)
if err != nil {
- return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
+ return imageID, ref, fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
}
if len(b.additionalTags) > 0 {
if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
- return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...))
+ return imageID, ref, fmt.Errorf("error setting image names to %v: %w", append(img.Names, b.additionalTags...), err)
}
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
}
// Report back the caller the tags applied, if any.
img, err = is.Transport.GetStoreImage(b.store, dest)
if err != nil {
- return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
+ return imageID, ref, fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
}
for _, name := range img.Names {
fmt.Fprintf(b.out, "Successfully tagged %s\n", name)
@@ -795,11 +897,11 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
logrus.Debugf("printing final image id %q", imageID)
if b.iidfile != "" {
if err = ioutil.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil {
- return imageID, ref, errors.Wrapf(err, "failed to write image ID to file %q", b.iidfile)
+ return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err)
}
} else {
if _, err := stdout.Write([]byte(imageID + "\n")); err != nil {
- return imageID, ref, errors.Wrapf(err, "failed to write image ID to stdout")
+ return imageID, ref, fmt.Errorf("failed to write image ID to stdout: %w", err)
}
}
return imageID, ref, nil
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 576ae5ed9..d21757f4b 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -36,7 +36,6 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -90,10 +89,10 @@ func (s *StageExecutor) Preserve(path string) error {
// except ensure that it exists.
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
- return errors.Wrapf(err, "error ensuring volume path exists")
+ return fmt.Errorf("error ensuring volume path exists: %w", err)
}
if err := s.volumeCacheInvalidate(path); err != nil {
- return errors.Wrapf(err, "error ensuring volume path %q is preserved", filepath.Join(s.mountPoint, path))
+ return fmt.Errorf("error ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
}
return nil
}
@@ -101,7 +100,7 @@ func (s *StageExecutor) Preserve(path string) error {
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
- return errors.Errorf("unable to locate temporary directory for container")
+ return fmt.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
// Save info about the top level of the location that we'll be archiving.
@@ -112,22 +111,22 @@ func (s *StageExecutor) Preserve(path string) error {
if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil {
symLink, err := filepath.Rel(s.mountPoint, evaluated)
if err != nil {
- return errors.Wrapf(err, "making evaluated path %q relative to %q", evaluated, s.mountPoint)
+ return fmt.Errorf("making evaluated path %q relative to %q: %w", evaluated, s.mountPoint, err)
}
if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) {
- return errors.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
+ return fmt.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
}
archivedPath = evaluated
path = string(os.PathSeparator) + symLink
} else {
- return errors.Wrapf(err, "error evaluating path %q", path)
+ return fmt.Errorf("error evaluating path %q: %w", path, err)
}
st, err := os.Stat(archivedPath)
if os.IsNotExist(err) {
createdDirPerms := os.FileMode(0755)
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
- return errors.Wrapf(err, "error ensuring volume path exists")
+ return fmt.Errorf("error ensuring volume path exists: %w", err)
}
st, err = os.Stat(archivedPath)
}
@@ -139,7 +138,7 @@ func (s *StageExecutor) Preserve(path string) error {
if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should work.
- return errors.Errorf("error adding %q to the volume cache", path)
+ return fmt.Errorf("error adding %q to the volume cache", path)
}
s.volumeCache[path] = cacheFile
// Now prune cache files for volumes that are now supplanted by this one.
@@ -204,14 +203,14 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
- return nil, errors.Wrapf(err, "error evaluating volume path")
+ return nil, fmt.Errorf("error evaluating volume path: %w", err)
}
relativePath, err := filepath.Rel(s.mountPoint, archivedPath)
if err != nil {
- return nil, errors.Wrapf(err, "error converting %q into a path relative to %q", archivedPath, s.mountPoint)
+ return nil, fmt.Errorf("error converting %q into a path relative to %q: %w", archivedPath, s.mountPoint, err)
}
if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
- return nil, errors.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint)
+ return nil, fmt.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint)
}
_, err = os.Stat(cacheFile)
if err == nil {
@@ -223,7 +222,7 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
}
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
- return nil, errors.Wrapf(err, "error ensuring volume path exists")
+ return nil, fmt.Errorf("error ensuring volume path exists: %w", err)
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
@@ -233,12 +232,12 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
defer cache.Close()
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
if err != nil {
- return nil, errors.Wrapf(err, "error archiving %q", archivedPath)
+ return nil, fmt.Errorf("error archiving %q: %w", archivedPath, err)
}
defer rc.Close()
_, err = io.Copy(cache, rc)
if err != nil {
- return nil, errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile)
+ return nil, fmt.Errorf("error archiving %q to %q: %w", archivedPath, cacheFile, err)
}
mount := specs.Mount{
Source: archivedPath,
@@ -256,7 +255,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
- return errors.Wrapf(err, "error evaluating volume path")
+ return fmt.Errorf("error evaluating volume path: %w", err)
}
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
@@ -273,7 +272,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
}
err = chrootarchive.Untar(cache, archivedPath, nil)
if err != nil {
- return errors.Wrapf(err, "error extracting archive at %q", archivedPath)
+ return fmt.Errorf("error extracting archive at %q: %w", archivedPath, err)
}
if st, ok := s.volumeCacheInfo[cachedPath]; ok {
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
@@ -302,7 +301,7 @@ func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err erro
for cachedPath := range s.volumeCache {
err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
if err != nil {
- return nil, errors.Wrapf(err, "ensuring volume exists")
+ return nil, fmt.Errorf("ensuring volume exists: %w", err)
}
volumePath := filepath.Join(s.mountPoint, cachedPath)
mount := specs.Mount{
@@ -367,7 +366,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
if fromErr != nil {
- return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
+ return fmt.Errorf("unable to resolve argument %q: %w", copy.From, fromErr)
}
var additionalBuildContext *define.AdditionalBuildContext
if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
@@ -378,12 +377,11 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
// exists and if stage short_name matches any
// additionalContext replace stage with additional
// build context.
- if _, err := strconv.Atoi(from); err == nil {
- if stage, ok := s.executor.stages[from]; ok {
- if foundContext, ok := s.executor.additionalBuildContexts[stage.name]; ok {
- additionalBuildContext = foundContext
- }
- }
+ if index, err := strconv.Atoi(from); err == nil {
+ from = s.stages[index].Name
+ }
+ if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
+ additionalBuildContext = foundContext
}
}
if additionalBuildContext != nil {
@@ -400,7 +398,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
// temp and point context to that.
path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
if err != nil {
- return errors.Wrapf(err, "unable to download context from external source %q", additionalBuildContext.Value)
+ return fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
}
// point context dir to the extracted path
contextDir = filepath.Join(path, subdir)
@@ -425,7 +423,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions
} else {
- return errors.Errorf("the stage %q has not been built", copy.From)
+ return fmt.Errorf("the stage %q has not been built", copy.From)
}
} else if additionalBuildContext.IsImage {
// Image was selected as additionalContext so only process image.
@@ -450,7 +448,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
sources = append(sources, src)
} else {
// returns an error to be compatible with docker
- return errors.Errorf("source can't be a URL for COPY")
+ return fmt.Errorf("source can't be a URL for COPY")
}
} else {
sources = append(sources, filepath.Join(contextDir, src))
@@ -484,7 +482,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
if strings.Contains(flag, "from") {
arr := strings.SplitN(flag, ",", 2)
if len(arr) < 2 {
- return nil, errors.Errorf("Invalid --mount command: %s", flag)
+ return nil, fmt.Errorf("Invalid --mount command: %s", flag)
}
tokens := strings.Split(arr[1], ",")
for _, val := range tokens {
@@ -492,14 +490,14 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
switch kv[0] {
case "from":
if len(kv) == 1 {
- return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument")
+ return nil, fmt.Errorf("unable to resolve argument for `from=`: bad argument")
}
if kv[1] == "" {
- return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value")
+ return nil, fmt.Errorf("unable to resolve argument for `from=`: from points to an empty value")
}
from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments())
if fromErr != nil {
- return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1])
+ return nil, fmt.Errorf("unable to resolve argument %q: %w", kv[1], fromErr)
}
// If additional buildContext contains this
// give priority to that and break if additional
@@ -508,7 +506,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
if additionalBuildContext.IsImage {
mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value)
if err != nil {
- return nil, errors.Errorf("%s from=%s: image found with that name", flag, from)
+ return nil, fmt.Errorf("%s from=%s: image found with that name", flag, from)
}
// The `from` in stageMountPoints should point
// to `mountPoint` replaced from additional
@@ -536,7 +534,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
// temp and point context to that.
path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
if err != nil {
- return nil, errors.Wrapf(err, "unable to download context from external source %q", additionalBuildContext.Value)
+ return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
}
// point context dir to the extracted path
mountPoint = filepath.Join(path, subdir)
@@ -562,7 +560,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
} else {
mountPoint, err := s.getImageRootfs(s.ctx, from)
if err != nil {
- return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from)
+ return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from)
}
stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint}
break
@@ -585,13 +583,13 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
return err
}
if s.builder == nil {
- return errors.Errorf("no build container available")
+ return fmt.Errorf("no build container available")
}
stdin := s.executor.in
if stdin == nil {
devNull, err := os.Open(os.DevNull)
if err != nil {
- return errors.Errorf("error opening %q for reading: %v", os.DevNull, err)
+ return fmt.Errorf("error opening %q for reading: %v", os.DevNull, err)
}
defer devNull.Close()
stdin = devNull
@@ -669,7 +667,7 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
}
- return errors.Errorf(err)
+ return fmt.Errorf(err)
}
// prepare creates a working container based on the specified image, or if one
@@ -684,7 +682,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
base, err := ib.From(node)
if err != nil {
logrus.Debugf("prepare(node.Children=%#v)", node.Children)
- return nil, errors.Wrapf(err, "error determining starting point for build")
+ return nil, fmt.Errorf("error determining starting point for build: %w", err)
}
from = base
}
@@ -710,7 +708,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
if stage.Builder.Platform != "" {
os, arch, variant, err := parse.Platform(stage.Builder.Platform)
if err != nil {
- return nil, errors.Wrapf(err, "unable to parse platform %q", stage.Builder.Platform)
+ return nil, fmt.Errorf("unable to parse platform %q: %w", stage.Builder.Platform, err)
}
if arch != "" || variant != "" {
builderSystemContext.ArchitectureChoice = arch
@@ -753,7 +751,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
if err != nil {
- return nil, errors.Wrapf(err, "error creating build container")
+ return nil, fmt.Errorf("error creating build container: %w", err)
}
// If executor's ProcessLabel and MountLabel is empty means this is the first stage
@@ -815,7 +813,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to update: %v", err2)
}
- return nil, errors.Wrapf(err, "error updating build context")
+ return nil, fmt.Errorf("error updating build context: %w", err)
}
}
mountPoint, err := builder.Mount(builder.MountLabel)
@@ -823,7 +821,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
}
- return nil, errors.Wrapf(err, "error mounting new container")
+ return nil, fmt.Errorf("error mounting new container: %w", err)
}
if rebase {
// Make this our "current" working container.
@@ -970,7 +968,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// the case, we need to commit() to create a new image.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil {
- return "", nil, errors.Wrapf(err, "error committing base container")
+ return "", nil, fmt.Errorf("error committing base container: %w", err)
}
} else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 {
// The image would be modified by the labels passed
@@ -987,6 +985,22 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
return "", nil, err
}
+ if s.executor.buildOutput != "" && lastStage {
+ // If we have reached this point then our build is just performing a tag
+ // and it contains no steps or instructions (i.e Containerfile only contains
+ // `FROM <imagename> and nothing else so we will never end up committing this
+ // but instead just re-tag image. For such use-cases if `-o` or `--output` was
+ // specified honor that and export the contents of the current build anyways.
+ logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
+ buildOutputOption, err := parse.GetBuildOutput(s.executor.buildOutput)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to parse build output: %w", err)
+ }
+ if err := s.generateBuildOutput(buildah.CommitOptions{}, buildOutputOption); err != nil {
+ return "", nil, err
+ }
+ }
+
}
logImageID(imgID)
}
@@ -998,7 +1012,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// Resolve any arguments in this instruction.
step := ib.Step()
if err := step.Resolve(node); err != nil {
- return "", nil, errors.Wrapf(err, "error resolving step %+v", *node)
+ return "", nil, fmt.Errorf("error resolving step %+v: %w", *node, err)
}
logrus.Debugf("Parsed Step: %+v", *step)
if !s.executor.quiet {
@@ -1011,21 +1025,29 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
command := strings.ToUpper(step.Command)
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
- return "", nil, errors.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
+ return "", nil, fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
}
if command == "ADD" && (flag == "--chmod" || flag == "--chown") {
- return "", nil, errors.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
+ return "", nil, fmt.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
}
if strings.Contains(flag, "--from") && command == "COPY" {
arr := strings.Split(flag, "=")
if len(arr) != 2 {
- return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
+ return "", nil, fmt.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
// If arr[1] has an argument within it, resolve it to its
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
if fromErr != nil {
- return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
+ return "", nil, fmt.Errorf("unable to resolve argument %q: %w", arr[1], fromErr)
+ }
+
+ // Before looking into additional context
+ // also account if the index is given instead
+ // of name so convert index in --from=<index>
+ // to name.
+ if index, err := strconv.Atoi(from); err == nil {
+ from = s.stages[index].Name
}
// If additional buildContext contains this
// give priority to that and break if additional
@@ -1040,7 +1062,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// replace with image set in build context
from = additionalBuildContext.Value
if _, err := s.getImageRootfs(ctx, from); err != nil {
- return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
+ return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
}
break
}
@@ -1055,7 +1077,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
break
} else if _, err = s.getImageRootfs(ctx, from); err != nil {
- return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
+ return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
}
break
}
@@ -1075,8 +1097,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if !s.executor.layers {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
- logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
- return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
+ logrus.Debugf("Error building at step %+v: %v", *step, err)
+ return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
}
// In case we added content, retrieve its digest.
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
@@ -1109,7 +1131,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash)
if err != nil {
- return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
+ return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err)
}
logImageID(imgID)
} else {
@@ -1144,7 +1166,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if checkForLayers && step.Command != "arg" && !(s.executor.squash && lastInstruction && lastStage) {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
- return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
+ return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err)
}
}
@@ -1155,8 +1177,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if cacheID == "" {
// Process the instruction directly.
if err = ib.Run(step, s, noRunsRemaining); err != nil {
- logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
- return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
+ logrus.Debugf("Error building at step %+v: %v", *step, err)
+ return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
}
// In case we added content, retrieve its digest.
@@ -1175,7 +1197,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if checkForLayers {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
- return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
+ return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err)
}
}
} else {
@@ -1188,8 +1210,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if !s.stepRequiresLayer(step) {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
- logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
- return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
+ logrus.Debugf("Error building at step %+v: %v", *step, err)
+ return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
}
}
}
@@ -1222,7 +1244,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// can be part of build-cache.
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false)
if err != nil {
- return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
+ return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err)
}
}
@@ -1232,7 +1254,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if s.executor.squash && lastInstruction && lastStage {
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true)
if err != nil {
- return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step)
+ return "", nil, fmt.Errorf("error committing final squash step %+v: %w", *step, err)
}
}
@@ -1261,7 +1283,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// ID that we really should not be pulling anymore (see
// containers/podman/issues/10307).
if _, err := s.prepare(ctx, imgID, false, true, define.PullNever); err != nil {
- return "", nil, errors.Wrap(err, "error preparing container for next step")
+ return "", nil, fmt.Errorf("error preparing container for next step: %w", err)
}
}
}
@@ -1465,7 +1487,7 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
if err == nil {
err = destroyErr
} else {
- err = errors.Wrap(err, destroyErr.Error())
+ err = fmt.Errorf("%v: %w", destroyErr.Error(), err)
}
}
}()
@@ -1473,27 +1495,27 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
// Look up the source image, expecting it to be in local storage
src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID)
if err != nil {
- return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID)
+ return "", nil, fmt.Errorf("error getting source imageReference for %q: %w", cacheID, err)
}
options := cp.Options{
RemoveSignatures: true, // more like "ignore signatures", since they don't get removed when src and dest are the same image
}
manifestBytes, err := cp.Image(ctx, policyContext, dest, src, &options)
if err != nil {
- return "", nil, errors.Wrapf(err, "error copying image %q", cacheID)
+ return "", nil, fmt.Errorf("error copying image %q: %w", cacheID, err)
}
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID)
+ return "", nil, fmt.Errorf("error computing digest of manifest for image %q: %w", cacheID, err)
}
img, err := is.Transport.GetStoreImage(s.executor.store, dest)
if err != nil {
- return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest))
+ return "", nil, fmt.Errorf("error locating new copy of image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
}
var ref reference.Canonical
if dref := dest.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
- return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest))
+ return "", nil, fmt.Errorf("error computing canonical reference for new image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
}
}
return img.ID, ref, nil
@@ -1505,14 +1527,14 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
- return "", errors.Wrap(err, "error getting image list from store")
+ return "", fmt.Errorf("error getting image list from store: %w", err)
}
var baseHistory []v1.History
var baseDiffIDs []digest.Digest
if s.builder.FromImageID != "" {
_, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
- return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
+ return "", fmt.Errorf("error getting history of base image %q: %w", s.builder.FromImageID, err)
}
}
for _, image := range images {
@@ -1521,7 +1543,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
if image.TopLayer != "" {
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
if err != nil {
- return "", errors.Wrapf(err, "error getting top layer info")
+ return "", fmt.Errorf("error getting top layer info: %w", err)
}
// Figure out which layer from this image we should
// compare our container's base layer to.
@@ -1574,7 +1596,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput)
if err != nil {
- return "", nil, errors.Wrapf(err, "failed to parse build output")
+ return "", nil, fmt.Errorf("failed to parse build output: %w", err)
}
}
var imageRef types.ImageReference
@@ -1719,36 +1741,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
}
// generate build output
if s.executor.buildOutput != "" {
- extractRootfsOpts := buildah.ExtractRootfsOptions{}
- if unshare.IsRootless() {
- // In order to maintain as much parity as possible
- // with buildkit's version of --output and to avoid
- // unsafe invocation of exported executables it was
- // decided to strip setuid,setgid and extended attributes.
- // Since modes like setuid,setgid leaves room for executable
- // to get invoked with different file-system permission its safer
- // to strip them off for unpriviledged invocation.
- // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633
- extractRootfsOpts.StripSetuidBit = true
- extractRootfsOpts.StripSetgidBit = true
- extractRootfsOpts.StripXattrs = true
- }
- rc, errChan, err := s.builder.ExtractRootfs(options, extractRootfsOpts)
- if err != nil {
- return "", nil, errors.Wrapf(err, "failed to extract rootfs from given container image")
- }
- defer rc.Close()
- err = internalUtil.ExportFromReader(rc, buildOutputOption)
- if err != nil {
- return "", nil, errors.Wrapf(err, "failed to export build output")
- }
- if errChan != nil {
- err = <-errChan
- if err != nil {
- return "", nil, err
- }
+ if err := s.generateBuildOutput(buildah.CommitOptions{}, buildOutputOption); err != nil {
+ return "", nil, err
}
-
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
@@ -1758,13 +1753,46 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
if imageRef != nil {
if dref := imageRef.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
- return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID)
+ return "", nil, fmt.Errorf("error computing canonical reference for new image %q: %w", imgID, err)
}
}
}
return imgID, ref, nil
}
+func (s *StageExecutor) generateBuildOutput(commitOpts buildah.CommitOptions, buildOutputOpts define.BuildOutputOption) error {
+ extractRootfsOpts := buildah.ExtractRootfsOptions{}
+ if unshare.IsRootless() {
+ // In order to maintain as much parity as possible
+ // with buildkit's version of --output and to avoid
+ // unsafe invocation of exported executables it was
+ // decided to strip setuid,setgid and extended attributes.
+ // Since modes like setuid,setgid leaves room for executable
+ // to get invoked with different file-system permission its safer
+ // to strip them off for unpriviledged invocation.
+ // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633
+ extractRootfsOpts.StripSetuidBit = true
+ extractRootfsOpts.StripSetgidBit = true
+ extractRootfsOpts.StripXattrs = true
+ }
+ rc, errChan, err := s.builder.ExtractRootfs(commitOpts, extractRootfsOpts)
+ if err != nil {
+ return fmt.Errorf("failed to extract rootfs from given container image: %w", err)
+ }
+ defer rc.Close()
+ err = internalUtil.ExportFromReader(rc, buildOutputOpts)
+ if err != nil {
+ return fmt.Errorf("failed to export build output: %w", err)
+ }
+ if errChan != nil {
+ err = <-errChan
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func (s *StageExecutor) EnsureContainerPath(path string) error {
return s.builder.EnsureContainerPathAs(path, "", nil)
}
diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go
index a4fcee476..70dccad94 100644
--- a/vendor/github.com/containers/buildah/import.go
+++ b/vendor/github.com/containers/buildah/import.go
@@ -2,6 +2,8 @@ package buildah
import (
"context"
+ "errors"
+ "fmt"
"github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
@@ -13,12 +15,11 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) {
if imageID == "" {
- return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage")
+ return nil, errors.New("Internal error: imageID is empty in importBuilderDataFromImage")
}
storeopts, err := storage.DefaultStoreOptions(false, 0)
@@ -29,18 +30,18 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
ref, err := is.Transport.ParseStoreReference(store, imageID)
if err != nil {
- return nil, errors.Wrapf(err, "no such image %q", imageID)
+ return nil, fmt.Errorf("no such image %q: %w", imageID, err)
}
src, err := ref.NewImageSource(ctx, systemContext)
if err != nil {
- return nil, errors.Wrapf(err, "error instantiating image source")
+ return nil, fmt.Errorf("error instantiating image source: %w", err)
}
defer src.Close()
imageDigest := ""
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error loading image manifest for %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error loading image manifest for %q: %w", transports.ImageName(ref), err)
}
if manifestDigest, err := manifest.Digest(manifestBytes); err == nil {
imageDigest = manifestDigest.String()
@@ -50,18 +51,18 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
if manifest.MIMETypeIsMultiImage(manifestType) {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing image manifest for %q as list", transports.ImageName(ref))
+ return nil, fmt.Errorf("error parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
}
instance, err := list.ChooseInstance(systemContext)
if err != nil {
- return nil, errors.Wrapf(err, "error finding an appropriate image in manifest list %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
}
instanceDigest = &instance
}
image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest))
if err != nil {
- return nil, errors.Wrapf(err, "error instantiating image for %q instance %q", transports.ImageName(ref), instanceDigest)
+ return nil, fmt.Errorf("error instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
}
imageName := ""
@@ -72,7 +73,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
if img.TopLayer != "" {
layer, err4 := store.Layer(img.TopLayer)
if err4 != nil {
- return nil, errors.Wrapf(err4, "error reading information about image's top layer")
+ return nil, fmt.Errorf("error reading information about image's top layer: %w", err4)
}
uidmap, gidmap = convertStorageIDMaps(layer.UIDMap, layer.GIDMap)
}
@@ -109,7 +110,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
}
if err := builder.initConfig(ctx, image, systemContext); err != nil {
- return nil, errors.Wrapf(err, "error preparing image configuration")
+ return nil, fmt.Errorf("error preparing image configuration: %w", err)
}
return builder, nil
@@ -117,7 +118,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
if options.Container == "" {
- return nil, errors.Errorf("container name must be specified")
+ return nil, errors.New("container name must be specified")
}
c, err := store.Container(options.Container)
@@ -146,7 +147,7 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio
err = builder.Save()
if err != nil {
- return nil, errors.Wrapf(err, "error saving builder state")
+ return nil, fmt.Errorf("error saving builder state: %w", err)
}
return builder, nil
@@ -154,19 +155,19 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio
func importBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) {
if options.Image == "" {
- return nil, errors.Errorf("image name must be specified")
+ return nil, errors.New("image name must be specified")
}
systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
_, img, err := util.FindImage(store, "", systemContext, options.Image)
if err != nil {
- return nil, errors.Wrapf(err, "importing settings")
+ return nil, fmt.Errorf("importing settings: %w", err)
}
builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "")
if err != nil {
- return nil, errors.Wrapf(err, "error importing build settings from image %q", options.Image)
+ return nil, fmt.Errorf("error importing build settings from image %q: %w", options.Image, err)
}
builder.setupLogger()
diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go
index 12b69c9ba..04a1fd08f 100644
--- a/vendor/github.com/containers/buildah/info.go
+++ b/vendor/github.com/containers/buildah/info.go
@@ -3,6 +3,7 @@ package buildah
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -17,7 +18,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -189,7 +189,7 @@ func readUptime() (string, error) {
}
f := bytes.Fields(buf)
if len(f) < 1 {
- return "", errors.Errorf("invalid uptime")
+ return "", errors.New("invalid uptime")
}
return string(f[0]), nil
}
diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go
index 1c736cdf1..3da5f6d73 100644
--- a/vendor/github.com/containers/buildah/internal/parse/parse.go
+++ b/vendor/github.com/containers/buildah/internal/parse/parse.go
@@ -8,6 +8,8 @@ import (
"strconv"
"strings"
+ "errors"
+
"github.com/containers/buildah/internal"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/common/pkg/parse"
@@ -16,7 +18,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/lockfile"
specs "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
const (
@@ -76,22 +77,22 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
newMount.Options = append(newMount.Options, kv[0])
case "from":
if len(kv) == 1 {
- return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
fromImage = kv[1]
case "bind-propagation":
if len(kv) == 1 {
- return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
case "src", "source":
if len(kv) == 1 {
- return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Source = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
- return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, "", err
@@ -103,7 +104,7 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
// and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
- return newMount, "", errors.Wrapf(errBadMntOption, kv[0])
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
@@ -223,22 +224,22 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
sharing = kv[1]
case "bind-propagation":
if len(kv) == 1 {
- return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
case "id":
if len(kv) == 1 {
- return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
id = kv[1]
case "from":
if len(kv) == 1 {
- return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
fromStage = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
- return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, lockedTargets, err
@@ -247,35 +248,35 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
setDest = true
case "src", "source":
if len(kv) == 1 {
- return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Source = kv[1]
case "mode":
if len(kv) == 1 {
- return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
mode, err = strconv.ParseUint(kv[1], 8, 32)
if err != nil {
- return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache mode")
+ return newMount, lockedTargets, fmt.Errorf("unable to parse cache mode: %w", err)
}
case "uid":
if len(kv) == 1 {
- return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
uid, err = strconv.Atoi(kv[1])
if err != nil {
- return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache uid")
+ return newMount, lockedTargets, fmt.Errorf("unable to parse cache uid: %w", err)
}
case "gid":
if len(kv) == 1 {
- return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
gid, err = strconv.Atoi(kv[1])
if err != nil {
- return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache gid")
+ return newMount, lockedTargets, fmt.Errorf("unable to parse cache gid: %w", err)
}
default:
- return newMount, lockedTargets, errors.Wrapf(errBadMntOption, kv[0])
+ return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
@@ -313,7 +314,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// create cache on host if not present
err = os.MkdirAll(cacheParent, os.FileMode(0755))
if err != nil {
- return newMount, lockedTargets, errors.Wrapf(err, "Unable to create build cache directory")
+ return newMount, lockedTargets, fmt.Errorf("unable to create build cache directory: %w", err)
}
if id != "" {
@@ -328,7 +329,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
//buildkit parity: change uid and gid if specified otheriwise keep `0`
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
if err != nil {
- return newMount, lockedTargets, errors.Wrapf(err, "Unable to change uid,gid of cache directory")
+ return newMount, lockedTargets, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
}
}
@@ -337,7 +338,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// lock parent cache
lockfile, err := lockfile.GetLockfile(filepath.Join(newMount.Source, BuildahCacheLockfile))
if err != nil {
- return newMount, lockedTargets, errors.Wrapf(err, "Unable to acquire lock when sharing mode is locked")
+ return newMount, lockedTargets, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
}
// Will be unlocked after the RUN step is executed.
lockfile.Lock()
@@ -347,7 +348,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
break
default:
// error out for unknown values
- return newMount, lockedTargets, errors.Wrapf(err, "Unrecognized value %q for field `sharing`", sharing)
+ return newMount, lockedTargets, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
}
// buildkit parity: default sharing should be shared
@@ -375,10 +376,10 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// ValidateVolumeMountHostDir validates the host path of buildah --volume
func ValidateVolumeMountHostDir(hostDir string) error {
if !filepath.IsAbs(hostDir) {
- return errors.Errorf("invalid host path, must be an absolute path %q", hostDir)
+ return fmt.Errorf("invalid host path, must be an absolute path %q", hostDir)
}
if _, err := os.Stat(hostDir); err != nil {
- return errors.WithStack(err)
+ return err
}
return nil
}
@@ -421,7 +422,7 @@ func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
return nil, err
}
if _, ok := finalVolumeMounts[volumeMount.Destination]; ok {
- return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination)
+ return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest)
}
finalVolumeMounts[volumeMount.Destination] = volumeMount
}
@@ -433,7 +434,7 @@ func Volume(volume string) (specs.Mount, error) {
mount := specs.Mount{}
arr := SplitStringWithColonEscape(volume)
if len(arr) < 2 {
- return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
+ return mount, fmt.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
}
if err := ValidateVolumeMountHostDir(arr[0]); err != nil {
return mount, err
@@ -468,7 +469,7 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string,
}
for dest, mount := range volumeMounts {
if _, ok := unifiedMounts[dest]; ok {
- return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, dest)
+ return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", dest, errDuplicateDest)
}
unifiedMounts[dest] = mount
}
@@ -489,7 +490,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
mountedImages := make([]string, 0)
lockedTargets := make([]string, 0)
- errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
+ errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
// TODO(vrothberg): the manual parsing can be replaced with a regular expression
// to allow a more robust parsing of the mount format and to give
@@ -497,13 +498,13 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
for _, mount := range mounts {
arr := strings.SplitN(mount, ",", 2)
if len(arr) < 2 {
- return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount)
+ return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
kv := strings.Split(arr[0], "=")
// TODO: type is not explicitly required in Docker.
// If not specified, it defaults to "volume".
if len(kv) != 2 || kv[0] != "type" {
- return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount)
+ return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
tokens := strings.Split(arr[1], ",")
@@ -514,7 +515,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
return nil, mountedImages, lockedTargets, err
}
if _, ok := finalMounts[mount.Destination]; ok {
- return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination)
+ return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
mountedImages = append(mountedImages, image)
@@ -525,7 +526,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
return nil, mountedImages, lockedTargets, err
}
if _, ok := finalMounts[mount.Destination]; ok {
- return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination)
+ return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
case TypeTmpfs:
@@ -534,11 +535,11 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
return nil, mountedImages, lockedTargets, err
}
if _, ok := finalMounts[mount.Destination]; ok {
- return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination)
+ return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
default:
- return nil, mountedImages, lockedTargets, errors.Errorf("invalid filesystem type %q", kv[1])
+ return nil, mountedImages, lockedTargets, fmt.Errorf("invalid filesystem type %q", kv[1])
}
}
@@ -567,19 +568,19 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
newMount.Options = append(newMount.Options, kv[0])
case "tmpfs-mode":
if len(kv) == 1 {
- return newMount, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
case "tmpfs-size":
if len(kv) == 1 {
- return newMount, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
case "src", "source":
- return newMount, errors.Errorf("source is not supported with tmpfs mounts")
+ return newMount, errors.New("source is not supported with tmpfs mounts")
case "target", "dst", "destination":
if len(kv) == 1 {
- return newMount, errors.Wrapf(errBadOptionArg, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
@@ -587,7 +588,7 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
newMount.Destination = kv[1]
setDest = true
default:
- return newMount, errors.Wrapf(errBadMntOption, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go
index abaadc616..7d824ccf2 100644
--- a/vendor/github.com/containers/buildah/internal/util/util.go
+++ b/vendor/github.com/containers/buildah/internal/util/util.go
@@ -1,6 +1,7 @@
package util
import (
+ "fmt"
"io"
"os"
"path/filepath"
@@ -14,7 +15,6 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/unshare"
- "github.com/pkg/errors"
)
// LookupImage returns *Image to corresponding imagename or id
@@ -66,25 +66,25 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
err = os.MkdirAll(opts.Path, 0700)
if err != nil {
- return errors.Wrapf(err, "failed while creating the destination path %q", opts.Path)
+ return fmt.Errorf("failed while creating the destination path %q: %w", opts.Path, err)
}
err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown})
if err != nil {
- return errors.Wrapf(err, "failed while performing untar at %q", opts.Path)
+ return fmt.Errorf("failed while performing untar at %q: %w", opts.Path, err)
}
} else {
outFile := os.Stdout
if !opts.IsStdout {
outFile, err = os.Create(opts.Path)
if err != nil {
- return errors.Wrapf(err, "failed while creating destination tar at %q", opts.Path)
+ return fmt.Errorf("failed while creating destination tar at %q: %w", opts.Path, err)
}
defer outFile.Close()
}
_, err = io.Copy(outFile, input)
if err != nil {
- return errors.Wrapf(err, "failed while performing copy to %q", opts.Path)
+ return fmt.Errorf("failed while performing copy to %q: %w", opts.Path, err)
}
}
return nil
@@ -97,7 +97,7 @@ func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) {
// decryption
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
if err != nil {
- return nil, errors.Wrapf(err, "invalid decryption keys")
+ return nil, fmt.Errorf("invalid decryption keys: %w", err)
}
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
decryptConfig = cc.DecryptConfig
@@ -116,7 +116,7 @@ func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.Enc
encLayers = &encryptLayers
ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
if err != nil {
- return nil, nil, errors.Wrapf(err, "invalid encryption keys")
+ return nil, nil, fmt.Errorf("invalid encryption keys: %w", err)
}
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
encConfig = cc.EncryptConfig
@@ -132,6 +132,6 @@ func GetFormat(format string) (string, error) {
case define.DOCKER:
return define.Dockerv2ImageManifest, nil
default:
- return "", errors.Errorf("unrecognized image type %q", format)
+ return "", fmt.Errorf("unrecognized image type %q", format)
}
}
diff --git a/vendor/github.com/containers/buildah/mount.go b/vendor/github.com/containers/buildah/mount.go
index 8c7a23f8c..3b1ff5820 100644
--- a/vendor/github.com/containers/buildah/mount.go
+++ b/vendor/github.com/containers/buildah/mount.go
@@ -1,21 +1,19 @@
package buildah
-import (
- "github.com/pkg/errors"
-)
+import "fmt"
// Mount mounts a container's root filesystem in a location which can be
// accessed from the host, and returns the location.
func (b *Builder) Mount(label string) (string, error) {
mountpoint, err := b.store.Mount(b.ContainerID, label)
if err != nil {
- return "", errors.Wrapf(err, "error mounting build container %q", b.ContainerID)
+ return "", fmt.Errorf("error mounting build container %q: %w", b.ContainerID, err)
}
b.MountPoint = mountpoint
err = b.Save()
if err != nil {
- return "", errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID)
+ return "", fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
}
return mountpoint, nil
}
@@ -23,7 +21,7 @@ func (b *Builder) Mount(label string) (string, error) {
func (b *Builder) setMountPoint(mountPoint string) error {
b.MountPoint = mountPoint
if err := b.Save(); err != nil {
- return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID)
+ return fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
}
return nil
}
@@ -32,17 +30,17 @@ func (b *Builder) setMountPoint(mountPoint string) error {
func (b *Builder) Mounted() (bool, error) {
mountCnt, err := b.store.Mounted(b.ContainerID)
if err != nil {
- return false, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
+ return false, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
}
mounted := mountCnt > 0
if mounted && b.MountPoint == "" {
ctr, err := b.store.Container(b.ContainerID)
if err != nil {
- return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
+ return mountCnt > 0, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
}
layer, err := b.store.Layer(ctr.LayerID)
if err != nil {
- return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
+ return mountCnt > 0, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
}
return mounted, b.setMountPoint(layer.MountPoint)
}
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 03b2de662..0ebda161b 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -2,6 +2,7 @@ package buildah
import (
"context"
+ "errors"
"fmt"
"math/rand"
"strings"
@@ -19,7 +20,6 @@ import (
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/openshift/imagebuilder"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -76,15 +76,20 @@ func imageNamePrefix(imageName string) string {
func newContainerIDMappingOptions(idmapOptions *define.IDMappingOptions) storage.IDMappingOptions {
var options storage.IDMappingOptions
if idmapOptions != nil {
- options.HostUIDMapping = idmapOptions.HostUIDMapping
- options.HostGIDMapping = idmapOptions.HostGIDMapping
- uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap)
- if len(uidmap) > 0 && len(gidmap) > 0 {
- options.UIDMap = uidmap
- options.GIDMap = gidmap
+ if idmapOptions.AutoUserNs {
+ options.AutoUserNs = true
+ options.AutoUserNsOpts = idmapOptions.AutoUserNsOpts
} else {
- options.HostUIDMapping = true
- options.HostGIDMapping = true
+ options.HostUIDMapping = idmapOptions.HostUIDMapping
+ options.HostGIDMapping = idmapOptions.HostGIDMapping
+ uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap)
+ if len(uidmap) > 0 && len(gidmap) > 0 {
+ options.UIDMap = uidmap
+ options.GIDMap = gidmap
+ } else {
+ options.HostUIDMapping = true
+ options.HostGIDMapping = true
+ }
}
}
return options
@@ -185,12 +190,12 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
if ref != nil {
srcSrc, err := ref.NewImageSource(ctx, systemContext)
if err != nil {
- return nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error instantiating image for %q: %w", transports.ImageName(ref), err)
}
defer srcSrc.Close()
manifestBytes, manifestType, err := srcSrc.GetManifest(ctx, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error loading image manifest for %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error loading image manifest for %q: %w", transports.ImageName(ref), err)
}
if manifestDigest, err := manifest.Digest(manifestBytes); err == nil {
imageDigest = manifestDigest.String()
@@ -199,17 +204,17 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
if manifest.MIMETypeIsMultiImage(manifestType) {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing image manifest for %q as list", transports.ImageName(ref))
+ return nil, fmt.Errorf("error parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
}
instance, err := list.ChooseInstance(systemContext)
if err != nil {
- return nil, errors.Wrapf(err, "error finding an appropriate image in manifest list %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
}
instanceDigest = &instance
}
src, err = image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(srcSrc, instanceDigest))
if err != nil {
- return nil, errors.Wrapf(err, "error instantiating image for %q instance %q", transports.ImageName(ref), instanceDigest)
+ return nil, fmt.Errorf("error instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
}
}
@@ -229,7 +234,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
if options.Container == "" {
containers, err := store.Containers()
if err != nil {
- return nil, errors.Wrapf(err, "unable to check for container names")
+ return nil, fmt.Errorf("unable to check for container names: %w", err)
}
tmpName = findUnusedContainer(tmpName, containers)
}
@@ -257,8 +262,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
name = tmpName
break
}
- if errors.Cause(err) != storage.ErrDuplicateName || options.Container != "" {
- return nil, errors.Wrapf(err, "error creating container")
+ if !errors.Is(err, storage.ErrDuplicateName) || options.Container != "" {
+ return nil, fmt.Errorf("error creating container: %w", err)
}
tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict)
conflict = conflict * 10
@@ -328,16 +333,16 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
if options.Mount {
_, err = builder.Mount(container.MountLabel())
if err != nil {
- return nil, errors.Wrapf(err, "error mounting build container %q", builder.ContainerID)
+ return nil, fmt.Errorf("error mounting build container %q: %w", builder.ContainerID, err)
}
}
if err := builder.initConfig(ctx, src, systemContext); err != nil {
- return nil, errors.Wrapf(err, "error preparing image configuration")
+ return nil, fmt.Errorf("error preparing image configuration: %w", err)
}
err = builder.Save()
if err != nil {
- return nil, errors.Wrapf(err, "error saving builder state for container %q", builder.ContainerID)
+ return nil, fmt.Errorf("error saving builder state for container %q: %w", builder.ContainerID, err)
}
return builder, nil
diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
index 0b5c04398..9fffc6d70 100644
--- a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
@@ -1,11 +1,11 @@
package chrootuser
import (
+ "errors"
+ "fmt"
"os/user"
"strconv"
"strings"
-
- "github.com/pkg/errors"
)
var (
@@ -76,9 +76,9 @@ func GetUser(rootdir, userspec string) (uint32, uint32, string, error) {
return uint32(uid64), uint32(gid64), homedir, nil
}
- err = errors.Wrapf(uerr, "error determining run uid")
+ err = fmt.Errorf("error determining run uid: %w", uerr)
if uerr == nil {
- err = errors.Wrapf(gerr, "error determining run gid")
+ err = fmt.Errorf("error determining run gid: %w", gerr)
}
return 0, 0, homedir, err
@@ -94,7 +94,7 @@ func GetGroup(rootdir, groupspec string) (uint32, error) {
gid64, gerr = lookupGroupInContainer(rootdir, groupspec)
}
if gerr != nil {
- return 0, errors.Wrapf(gerr, "error looking up group for gid %q", groupspec)
+ return 0, fmt.Errorf("error looking up group for gid %q: %w", groupspec, gerr)
}
return uint32(gid64), nil
}
@@ -103,7 +103,7 @@ func GetGroup(rootdir, groupspec string) (uint32, error) {
func GetAdditionalGroupsForUser(rootdir string, userid uint64) ([]uint32, error) {
gids, err := lookupAdditionalGroupsForUIDInContainer(rootdir, userid)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up supplemental groups for uid %d", userid)
+ return nil, fmt.Errorf("error looking up supplemental groups for uid %d: %w", userid, err)
}
return gids, nil
}
diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go
index 6c997c4c9..5655a54db 100644
--- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go
@@ -1,9 +1,10 @@
-// +build !linux
+//go:build !linux && !freebsd
+// +build !linux,!freebsd
package chrootuser
import (
- "github.com/pkg/errors"
+ "errors"
)
func lookupUserInContainer(rootdir, username string) (uint64, uint64, error) {
diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go
index 89b31782e..b58f5a428 100644
--- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go
@@ -1,4 +1,5 @@
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package chrootuser
diff --git a/vendor/github.com/containers/buildah/pkg/cli/build.go b/vendor/github.com/containers/buildah/pkg/cli/build.go
index 396a9e74e..98c42453b 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/build.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/build.go
@@ -5,6 +5,7 @@ package cli
// that vendor in this code can use them too.
import (
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -18,7 +19,6 @@ import (
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/pkg/util"
"github.com/containers/common/pkg/auth"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -66,7 +66,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
if c.Flag("logsplit").Changed {
if !c.Flag("logfile").Changed {
- return options, nil, nil, errors.Errorf("cannot use --logsplit without --logfile")
+ return options, nil, nil, errors.New("cannot use --logsplit without --logfile")
}
}
@@ -114,7 +114,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
if len(av) > 1 {
parseAdditionalBuildContext, err := parse.GetAdditionalBuildContext(av[1])
if err != nil {
- return options, nil, nil, errors.Wrapf(err, "while parsing additional build context")
+ return options, nil, nil, fmt.Errorf("while parsing additional build context: %w", err)
}
additionalBuildContext[av[0]] = &parseAdditionalBuildContext
} else {
@@ -140,13 +140,13 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
if len(cliArgs) == 0 {
contextDir, err = os.Getwd()
if err != nil {
- return options, nil, nil, errors.Wrapf(err, "unable to choose current working directory as build context")
+ return options, nil, nil, fmt.Errorf("unable to choose current working directory as build context: %w", err)
}
} else {
// The context directory could be a URL. Try to handle that.
tempDir, subDir, err := define.TempDirForURL("", "buildah", cliArgs[0])
if err != nil {
- return options, nil, nil, errors.Wrapf(err, "error prepping temporary context directory")
+ return options, nil, nil, fmt.Errorf("error prepping temporary context directory: %w", err)
}
if tempDir != "" {
// We had to download it to a temporary directory.
@@ -157,7 +157,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
// Nope, it was local. Use it as is.
absDir, err := filepath.Abs(cliArgs[0])
if err != nil {
- return options, nil, nil, errors.Wrapf(err, "error determining path to directory")
+ return options, nil, nil, fmt.Errorf("error determining path to directory: %w", err)
}
contextDir = absDir
}
@@ -175,7 +175,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
contextDir, err = filepath.EvalSymlinks(contextDir)
if err != nil {
- return options, nil, nil, errors.Wrapf(err, "error evaluating symlinks in build context path")
+ return options, nil, nil, fmt.Errorf("error evaluating symlinks in build context path: %w", err)
}
var stdin io.Reader
@@ -196,7 +196,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
systemContext, err := parse.SystemContextFromOptions(c)
if err != nil {
- return options, nil, nil, errors.Wrapf(err, "error building system context")
+ return options, nil, nil, fmt.Errorf("error building system context: %w", err)
}
isolation, err := parse.IsolationOption(iopts.Isolation)
@@ -226,11 +226,11 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
}
if pullFlagsCount > 1 {
- return options, nil, nil, errors.Errorf("can only set one of 'pull' or 'pull-always' or 'pull-never'")
+ return options, nil, nil, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'")
}
if (c.Flag("rm").Changed || c.Flag("force-rm").Changed) && (!c.Flag("layers").Changed && !c.Flag("no-cache").Changed) {
- return options, nil, nil, errors.Errorf("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'")
+ return options, nil, nil, errors.New("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'")
}
if c.Flag("cache-from").Changed {
@@ -256,7 +256,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
}
usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation)
if err != nil {
- return options, nil, nil, errors.Wrapf(err, "error parsing ID mapping options")
+ return options, nil, nil, fmt.Errorf("error parsing ID mapping options: %w", err)
}
namespaceOptions.AddOrReplace(usernsOption...)
@@ -267,7 +267,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
decryptConfig, err := iutil.DecryptConfig(iopts.DecryptionKeys)
if err != nil {
- return options, nil, nil, errors.Wrapf(err, "unable to obtain decrypt config")
+ return options, nil, nil, fmt.Errorf("unable to obtain decrypt config: %w", err)
}
var excludes []string
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 695aba7fb..97ab95ee1 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -17,7 +17,6 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/spf13/pflag"
)
@@ -74,6 +73,7 @@ type BudResults struct {
NoCache bool
Timestamp int64
OmitHistory bool
+ OCIHooksDir []string
Pull string
PullAlways bool
PullNever bool
@@ -194,6 +194,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "set metadata for an image (default [])")
fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.")
+ fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)")
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
@@ -282,6 +283,7 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion["file"] = commonComp.AutocompleteDefault
flagCompletion["format"] = commonComp.AutocompleteNone
flagCompletion["from"] = commonComp.AutocompleteDefault
+ flagCompletion["hooks-dir"] = commonComp.AutocompleteNone
flagCompletion["ignorefile"] = commonComp.AutocompleteDefault
flagCompletion["iidfile"] = commonComp.AutocompleteDefault
flagCompletion["jobs"] = commonComp.AutocompleteNone
@@ -311,7 +313,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
fs := pflag.FlagSet{}
defaultContainerConfig, err := config.Default()
if err != nil {
- return fs, errors.Wrapf(err, "failed to get container config")
+ return fs, fmt.Errorf("failed to get container config: %w", err)
}
fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
@@ -440,7 +442,7 @@ func DefaultHistory() bool {
func VerifyFlagsArgsOrder(args []string) error {
for _, arg := range args {
if strings.HasPrefix(arg, "-") {
- return errors.Errorf("No options (%s) can be specified after the image or container name", arg)
+ return fmt.Errorf("no options (%s) can be specified after the image or container name", arg)
}
}
return nil
diff --git a/vendor/github.com/containers/buildah/pkg/jail/jail.go b/vendor/github.com/containers/buildah/pkg/jail/jail.go
new file mode 100644
index 000000000..fdaca5af2
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/jail/jail.go
@@ -0,0 +1,180 @@
+//go:build freebsd
+// +build freebsd
+
+package jail
+
+import (
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+type NS int32
+
+const (
+ DISABLED NS = 0
+ NEW NS = 1
+ INHERIT NS = 2
+
+ JAIL_CREATE = 0x01
+ JAIL_UPDATE = 0x02
+ JAIL_ATTACH = 0x04
+)
+
+type config struct {
+ params map[string]interface{}
+}
+
+func NewConfig() *config {
+ return &config{
+ params: make(map[string]interface{}),
+ }
+}
+
+func handleBoolSetting(key string, val bool) (string, interface{}) {
+ // jail doesn't deal with booleans - it uses paired parameter
+ // names, e.g. "persist"/"nopersist". If the key contains '.',
+ // the "no" prefix is applied to the last element.
+ if val == false {
+ parts := strings.Split(key, ".")
+ parts[len(parts)-1] = "no" + parts[len(parts)-1]
+ key = strings.Join(parts, ".")
+ }
+ return key, nil
+}
+
+func (c *config) Set(key string, value interface{}) {
+ // Normalise integer types to int32
+ switch v := value.(type) {
+ case int:
+ value = int32(v)
+ case uint32:
+ value = int32(v)
+ }
+
+ switch key {
+ case "jid", "devfs_ruleset", "enforce_statfs", "children.max", "securelevel":
+ if _, ok := value.(int32); !ok {
+ logrus.Fatalf("value for parameter %s must be an int32", key)
+ }
+ case "ip4", "ip6", "host", "vnet":
+ nsval, ok := value.(NS)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be a jail.NS", key)
+ }
+ if (key == "host" || key == "vnet") && nsval == DISABLED {
+ logrus.Fatalf("value for parameter %s cannot be DISABLED", key)
+ }
+ case "persist", "sysvmsg", "sysvsem", "sysvshm":
+ bval, ok := value.(bool)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be bool", key)
+ }
+ key, value = handleBoolSetting(key, bval)
+ default:
+ if strings.HasPrefix(key, "allow.") {
+ bval, ok := value.(bool)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be bool", key)
+ }
+ key, value = handleBoolSetting(key, bval)
+ } else {
+ if _, ok := value.(string); !ok {
+ logrus.Fatalf("value for parameter %s must be a string", key)
+ }
+ }
+ }
+ c.params[key] = value
+}
+
+func (c *config) getIovec() ([]syscall.Iovec, error) {
+ jiov := make([]syscall.Iovec, 0)
+ for key, value := range c.params {
+ iov, err := stringToIovec(key)
+ if err != nil {
+ return nil, err
+ }
+ jiov = append(jiov, iov)
+ switch v := value.(type) {
+ case string:
+ iov, err := stringToIovec(v)
+ if err != nil {
+ return nil, err
+ }
+ jiov = append(jiov, iov)
+ case int32:
+ jiov = append(jiov, syscall.Iovec{
+ Base: (*byte)(unsafe.Pointer(&v)),
+ Len: 4,
+ })
+ case NS:
+ jiov = append(jiov, syscall.Iovec{
+ Base: (*byte)(unsafe.Pointer(&v)),
+ Len: 4,
+ })
+ default:
+ jiov = append(jiov, syscall.Iovec{
+ Base: nil,
+ Len: 0,
+ })
+ }
+ }
+ return jiov, nil
+}
+
+type jail struct {
+ jid int32
+}
+
+func jailSet(jconf *config, flags int) (*jail, error) {
+ jiov, err := jconf.getIovec()
+ if err != nil {
+ return nil, err
+ }
+
+ jid, _, errno := syscall.Syscall(unix.SYS_JAIL_SET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags))
+ if errno != 0 {
+ return nil, errno
+ }
+ return &jail{
+ jid: int32(jid),
+ }, nil
+}
+
+func jailGet(jconf *config, flags int) (*jail, error) {
+ jiov, err := jconf.getIovec()
+ if err != nil {
+ return nil, err
+ }
+
+ jid, _, errno := syscall.Syscall(unix.SYS_JAIL_GET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags))
+ if errno != 0 {
+ return nil, errno
+ }
+ return &jail{
+ jid: int32(jid),
+ }, nil
+}
+
+func Create(jconf *config) (*jail, error) {
+ return jailSet(jconf, JAIL_CREATE)
+}
+
+func CreateAndAttach(jconf *config) (*jail, error) {
+ return jailSet(jconf, JAIL_CREATE|JAIL_ATTACH)
+}
+
+func FindByName(name string) (*jail, error) {
+ jconf := NewConfig()
+ jconf.Set("name", name)
+ return jailGet(jconf, 0)
+}
+
+func (j *jail) Set(jconf *config) error {
+ jconf.Set("jid", j.jid)
+ _, err := jailSet(jconf, JAIL_UPDATE)
+ return err
+}
diff --git a/vendor/github.com/containers/buildah/pkg/jail/jail_int32.go b/vendor/github.com/containers/buildah/pkg/jail/jail_int32.go
new file mode 100644
index 000000000..3e56bb62c
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/jail/jail_int32.go
@@ -0,0 +1,20 @@
+//go:build (386 || arm) && freebsd
+// +build 386 arm
+// +build freebsd
+
+package jail
+
+import (
+ "syscall"
+)
+
+func stringToIovec(val string) (syscall.Iovec, error) {
+ bs, err := syscall.ByteSliceFromString(val)
+ if err != nil {
+ return syscall.Iovec{}, err
+ }
+ var res syscall.Iovec
+ res.Base = &bs[0]
+ res.Len = uint32(len(bs))
+ return res, nil
+}
diff --git a/vendor/github.com/containers/buildah/pkg/jail/jail_int64.go b/vendor/github.com/containers/buildah/pkg/jail/jail_int64.go
new file mode 100644
index 000000000..dace13fbd
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/jail/jail_int64.go
@@ -0,0 +1,19 @@
+//go:build !(386 || arm) && freebsd
+// +build !386,!arm,freebsd
+
+package jail
+
+import (
+ "syscall"
+)
+
+func stringToIovec(val string) (syscall.Iovec, error) {
+ bs, err := syscall.ByteSliceFromString(val)
+ if err != nil {
+ return syscall.Iovec{}, err
+ }
+ var res syscall.Iovec
+ res.Base = &bs[0]
+ res.Len = uint64(len(bs))
+ return res, nil
+}
diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
index acd7bccdc..6ab10b13c 100644
--- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
+++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
@@ -9,11 +9,12 @@ import (
"strings"
"syscall"
+ "errors"
+
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -56,12 +57,12 @@ type Options struct {
func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
contentDir := filepath.Join(containerDir, "overlay")
if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
- return "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir)
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
}
contentDir, err := ioutil.TempDir(contentDir, "")
if err != nil {
- return "", errors.Wrapf(err, "failed to create the overlay tmpdir in %s directory", contentDir)
+ return "", fmt.Errorf("failed to create the overlay tmpdir in %s directory: %w", contentDir, err)
}
return generateOverlayStructure(contentDir, rootUID, rootGID)
@@ -71,7 +72,7 @@ func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID int) (string, error) {
contentDir := filepath.Join(containerDir, "overlay-containers", containerID, name)
if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
- return "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir)
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
}
return generateOverlayStructure(contentDir, rootUID, rootGID)
@@ -82,14 +83,14 @@ func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string
upperDir := filepath.Join(containerDir, "upper")
workDir := filepath.Join(containerDir, "work")
if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil {
- return "", errors.Wrapf(err, "failed to create the overlay %s directory", upperDir)
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", upperDir, err)
}
if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil {
- return "", errors.Wrapf(err, "failed to create the overlay %s directory", workDir)
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", workDir, err)
}
mergeDir := filepath.Join(containerDir, "merge")
if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil {
- return "", errors.Wrapf(err, "failed to create the overlay %s directory", mergeDir)
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", mergeDir, err)
}
return containerDir, nil
@@ -141,7 +142,7 @@ func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error
cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir)
if err := cmd.Run(); err != nil {
- return errors.Wrapf(err, "exec %s", mountProgram)
+ return fmt.Errorf("exec %s: %w", mountProgram, err)
}
return nil
}
@@ -238,7 +239,7 @@ func Unmount(contentDir string) error {
// If they fail, fallback to unix.Unmount
for _, v := range []string{"fusermount3", "fusermount"} {
err := exec.Command(v, "-u", mergeDir).Run()
- if err != nil && errors.Cause(err) != exec.ErrNotFound {
+ if err != nil && !errors.Is(err, exec.ErrNotFound) {
logrus.Debugf("Error unmounting %s with %s - %v", mergeDir, v, err)
}
if err == nil {
@@ -250,7 +251,7 @@ func Unmount(contentDir string) error {
// Ignore EINVAL as the specified merge dir is not a mount point
if err := unix.Unmount(mergeDir, 0); err != nil && !os.IsNotExist(err) && err != unix.EINVAL {
- return errors.Wrapf(err, "unmount overlay %s", mergeDir)
+ return fmt.Errorf("unmount overlay %s: %w", mergeDir, err)
}
return nil
}
@@ -261,15 +262,15 @@ func recreate(contentDir string) error {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrap(err, "failed to stat overlay upper directory")
+ return fmt.Errorf("failed to stat overlay upper directory: %w", err)
}
if err := os.RemoveAll(contentDir); err != nil {
- return errors.WithStack(err)
+ return err
}
if err := idtools.MkdirAllAs(contentDir, os.FileMode(st.Mode()), int(st.UID()), int(st.GID())); err != nil {
- return errors.Wrap(err, "failed to create overlay directory")
+ return fmt.Errorf("failed to create overlay directory: %w", err)
}
return nil
}
@@ -295,7 +296,7 @@ func CleanupContent(containerDir string) (Err error) {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrap(err, "read directory")
+ return fmt.Errorf("read directory: %w", err)
}
for _, f := range files {
dir := filepath.Join(contentDir, f.Name())
@@ -305,7 +306,7 @@ func CleanupContent(containerDir string) (Err error) {
}
if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) {
- return errors.Wrap(err, "failed to cleanup overlay directory")
+ return fmt.Errorf("failed to cleanup overlay directory: %w", err)
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index cdc421f97..26ee4da77 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -5,6 +5,7 @@ package parse
// would be useful to projects vendoring buildah
import (
+ "errors"
"fmt"
"net"
"os"
@@ -21,10 +22,10 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
+ storageTypes "github.com/containers/storage/types"
units "github.com/docker/go-units"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -65,7 +66,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
if memVal != "" {
memoryLimit, err = units.RAMInBytes(memVal)
if err != nil {
- return nil, errors.Wrapf(err, "invalid value for memory")
+ return nil, fmt.Errorf("invalid value for memory: %w", err)
}
}
@@ -76,7 +77,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
} else {
memorySwap, err = units.RAMInBytes(memSwapValue)
if err != nil {
- return nil, errors.Wrapf(err, "invalid value for memory-swap")
+ return nil, fmt.Errorf("invalid value for memory-swap: %w", err)
}
}
}
@@ -86,11 +87,11 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
addHost, _ := flags.GetStringSlice("add-host")
if len(addHost) > 0 {
if noHosts {
- return nil, errors.Errorf("--no-hosts and --add-host conflict, can not be used together")
+ return nil, errors.New("--no-hosts and --add-host conflict, can not be used together")
}
for _, host := range addHost {
if err := validateExtraHost(host); err != nil {
- return nil, errors.Wrapf(err, "invalid value for add-host")
+ return nil, fmt.Errorf("invalid value for add-host: %w", err)
}
}
}
@@ -105,7 +106,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
}
}
if noDNS && len(dnsServers) > 1 {
- return nil, errors.Errorf("invalid --dns, --dns=none may not be used with any other --dns options")
+ return nil, errors.New("invalid --dns, --dns=none may not be used with any other --dns options")
}
}
@@ -113,7 +114,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
if flags.Changed("dns-search") {
dnsSearch, _ = flags.GetStringSlice("dns-search")
if noDNS && len(dnsSearch) > 0 {
- return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none")
+ return nil, errors.New("invalid --dns-search, --dns-search may not be used with --dns=none")
}
}
@@ -121,12 +122,12 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
if flags.Changed("dns-option") {
dnsOptions, _ = flags.GetStringSlice("dns-option")
if noDNS && len(dnsOptions) > 0 {
- return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none")
+ return nil, errors.New("invalid --dns-option, --dns-option may not be used with --dns=none")
}
}
if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil {
- return nil, errors.Wrapf(err, "invalid --shm-size")
+ return nil, fmt.Errorf("invalid --shm-size: %w", err)
}
volumes, _ := flags.GetStringArray("volume")
if err := Volumes(volumes); err != nil {
@@ -146,6 +147,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
secrets, _ := flags.GetStringArray("secret")
sshsources, _ := flags.GetStringArray("ssh")
+ ociHooks, _ := flags.GetStringArray("hooks-dir")
commonOpts := &define.CommonBuildOptions{
AddHost: addHost,
@@ -169,6 +171,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
Volumes: volumes,
Secrets: secrets,
SSHSources: sshsources,
+ OCIHooksDir: ociHooks,
}
securityOpts, _ := flags.GetStringArray("security-opt")
if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
@@ -195,7 +198,7 @@ func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, err
} else {
path, err := filepath.Abs(value)
if err != nil {
- return define.AdditionalBuildContext{}, errors.Wrapf(err, "unable to convert additional build-context %q path to absolute", value)
+ return define.AdditionalBuildContext{}, fmt.Errorf("unable to convert additional build-context %q path to absolute: %w", value, err)
}
ret.Value = path
}
@@ -205,11 +208,11 @@ func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, err
func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error {
for _, opt := range securityOpts {
if opt == "no-new-privileges" {
- return errors.Errorf("no-new-privileges is not supported")
+ return errors.New("no-new-privileges is not supported")
}
con := strings.SplitN(opt, "=", 2)
if len(con) != 2 {
- return errors.Errorf("invalid --security-opt name=value pair: %q", opt)
+ return fmt.Errorf("invalid --security-opt name=value pair: %q", opt)
}
switch con[0] {
@@ -220,7 +223,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti
case "seccomp":
commonOpts.SeccompProfilePath = con[1]
default:
- return errors.Errorf("invalid --security-opt 2: %q", opt)
+ return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
}
@@ -230,11 +233,11 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti
commonOpts.SeccompProfilePath = SeccompOverridePath
} else {
if !os.IsNotExist(err) {
- return errors.WithStack(err)
+ return err
}
if _, err := os.Stat(SeccompDefaultPath); err != nil {
if !os.IsNotExist(err) {
- return errors.WithStack(err)
+ return err
}
} else {
commonOpts.SeccompProfilePath = SeccompDefaultPath
@@ -289,10 +292,10 @@ func validateExtraHost(val string) error {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
if len(arr) != 2 || len(arr[0]) == 0 {
- return errors.Errorf("bad format for add-host: %q", val)
+ return fmt.Errorf("bad format for add-host: %q", val)
}
if _, err := validateIPAddress(arr[1]); err != nil {
- return errors.Errorf("invalid IP address in add-host: %q", arr[1])
+ return fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
return nil
}
@@ -304,7 +307,7 @@ func validateIPAddress(val string) (string, error) {
if ip != nil {
return ip.String(), nil
}
- return "", errors.Errorf("%s is not an ip address", val)
+ return "", fmt.Errorf("%s is not an ip address", val)
}
// SystemContextFromOptions returns a SystemContext populated with values
@@ -393,7 +396,7 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin
return nil, err
}
if len(specs) == 0 || specs[0] == "" {
- return nil, errors.Errorf("unable to parse --platform value %v", specs)
+ return nil, fmt.Errorf("unable to parse --platform value %v", specs)
}
platform := specs[0]
os, arch, variant, err := Platform(platform)
@@ -401,7 +404,7 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin
return nil, err
}
if ctx.OSChoice != "" || ctx.ArchitectureChoice != "" || ctx.VariantChoice != "" {
- return nil, errors.Errorf("invalid --platform may not be used with --os, --arch, or --variant")
+ return nil, errors.New("invalid --platform may not be used with --os, --arch, or --variant")
}
ctx.OSChoice = os
ctx.ArchitectureChoice = arch
@@ -428,7 +431,7 @@ func PlatformFromOptions(c *cobra.Command) (os, arch string, err error) {
return "", "", err
}
if len(platforms) < 1 {
- return "", "", errors.Errorf("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])")
+ return "", "", errors.New("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])")
}
return platforms[0].OS, platforms[0].Arch, nil
}
@@ -458,14 +461,14 @@ func PlatformsFromOptions(c *cobra.Command) (platforms []struct{ OS, Arch, Varia
platforms = nil
platformSpecs, err := c.Flags().GetStringSlice("platform")
if err != nil {
- return nil, errors.Wrap(err, "unable to parse platform")
+ return nil, fmt.Errorf("unable to parse platform: %w", err)
}
if os != "" || arch != "" || variant != "" {
- return nil, errors.Errorf("invalid --platform may not be used with --os, --arch, or --variant")
+ return nil, fmt.Errorf("invalid --platform may not be used with --os, --arch, or --variant")
}
for _, pf := range platformSpecs {
if os, arch, variant, err = Platform(pf); err != nil {
- return nil, errors.Wrapf(err, "unable to parse platform %q", pf)
+ return nil, fmt.Errorf("unable to parse platform %q: %w", pf, err)
}
platforms = append(platforms, struct{ OS, Arch, Variant string }{os, arch, variant})
}
@@ -497,7 +500,7 @@ func Platform(platform string) (os, arch, variant string, err error) {
return Platform(DefaultPlatform())
}
}
- return "", "", "", errors.Errorf("invalid platform syntax for %q (use OS/ARCH[/VARIANT][,...])", platform)
+ return "", "", "", fmt.Errorf("invalid platform syntax for %q (use OS/ARCH[/VARIANT][,...])", platform)
}
func parseCreds(creds string) (string, string) {
@@ -526,7 +529,7 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) {
fmt.Print("Password: ")
termPassword, err := term.ReadPassword(0)
if err != nil {
- return nil, errors.Wrapf(err, "could not read password from terminal")
+ return nil, fmt.Errorf("could not read password from terminal: %w", err)
}
password = string(termPassword)
}
@@ -567,7 +570,7 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
switch arr[0] {
case "type":
if typeSelected {
- return define.BuildOutputOption{}, fmt.Errorf("Duplicate %q not supported", arr[0])
+ return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
}
typeSelected = true
if arr[1] == "local" {
@@ -579,12 +582,12 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
}
case "dest":
if pathSelected {
- return define.BuildOutputOption{}, fmt.Errorf("Duplicate %q not supported", arr[0])
+ return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
}
pathSelected = true
path = arr[1]
default:
- return define.BuildOutputOption{}, fmt.Errorf("Unrecognized key %q in build output option: %q", arr[0], buildOutput)
+ return define.BuildOutputOption{}, fmt.Errorf("unrecognized key %q in build output option: %q", arr[0], buildOutput)
}
}
@@ -609,8 +612,52 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio
return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag)
}
+// GetAutoOptions returns a AutoUserNsOptions with the settings to setup automatically
+// a user namespace.
+func GetAutoOptions(base string) (*storageTypes.AutoUserNsOptions, error) {
+ parts := strings.SplitN(base, ":", 2)
+ if parts[0] != "auto" {
+ return nil, errors.New("wrong user namespace mode")
+ }
+ options := storageTypes.AutoUserNsOptions{}
+ if len(parts) == 1 {
+ return &options, nil
+ }
+ for _, o := range strings.Split(parts[1], ",") {
+ v := strings.SplitN(o, "=", 2)
+ if len(v) != 2 {
+ return nil, fmt.Errorf("invalid option specified: %q", o)
+ }
+ switch v[0] {
+ case "size":
+ s, err := strconv.ParseUint(v[1], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ options.Size = uint32(s)
+ case "uidmapping":
+ mapping, err := storageTypes.ParseIDMapping([]string{v[1]}, nil, "", "")
+ if err != nil {
+ return nil, err
+ }
+ options.AdditionalUIDMappings = append(options.AdditionalUIDMappings, mapping.UIDMap...)
+ case "gidmapping":
+ mapping, err := storageTypes.ParseIDMapping(nil, []string{v[1]}, "", "")
+ if err != nil {
+ return nil, err
+ }
+ options.AdditionalGIDMappings = append(options.AdditionalGIDMappings, mapping.GIDMap...)
+ default:
+ return nil, fmt.Errorf("unknown option specified: %q", v[0])
+ }
+ }
+ return &options, nil
+}
+
// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping.
func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
+ isAuto := false
+ autoOpts := &storageTypes.AutoUserNsOptions{}
user := findFlagFunc("userns-uid-map-user").Value.String()
group := findFlagFunc("userns-gid-map-group").Value.String()
// If only the user or group was specified, use the same value for the
@@ -693,18 +740,27 @@ func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.Fl
// user namespaces, override that default.
if findFlagFunc("userns").Changed {
how := findFlagFunc("userns").Value.String()
- switch how {
- case "", "container", "private":
+ if strings.HasPrefix(how, "auto") {
+ autoOpts, err = GetAutoOptions(how)
+ if err != nil {
+ return nil, nil, err
+ }
+ isAuto = true
usernsOption.Host = false
- case "host":
- usernsOption.Host = true
- default:
- how = strings.TrimPrefix(how, "ns:")
- if _, err := os.Stat(how); err != nil {
- return nil, nil, errors.Wrapf(err, "checking %s namespace", string(specs.UserNamespace))
+ } else {
+ switch how {
+ case "", "container", "private":
+ usernsOption.Host = false
+ case "host":
+ usernsOption.Host = true
+ default:
+ how = strings.TrimPrefix(how, "ns:")
+ if _, err := os.Stat(how); err != nil {
+ return nil, nil, fmt.Errorf("checking %s namespace: %w", string(specs.UserNamespace), err)
+ }
+ logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how)
+ usernsOption.Path = how
}
- logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how)
- usernsOption.Path = how
}
}
usernsOptions = define.NamespaceOptions{usernsOption}
@@ -712,13 +768,15 @@ func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.Fl
// If the user requested that we use the host namespace, but also that
// we use mappings, that's not going to work.
if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host {
- return nil, nil, errors.Errorf("can not specify ID mappings while using host's user namespace")
+ return nil, nil, fmt.Errorf("can not specify ID mappings while using host's user namespace")
}
return usernsOptions, &define.IDMappingOptions{
HostUIDMapping: usernsOption.Host,
HostGIDMapping: usernsOption.Host,
UIDMap: uidmap,
GIDMap: gidmap,
+ AutoUserNs: isAuto,
+ AutoUserNsOpts: *autoOpts,
}, nil
}
@@ -726,20 +784,20 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) {
for _, s := range spec {
args := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsDigit(r) })
if len(args)%3 != 0 {
- return nil, errors.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s)
+ return nil, fmt.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s)
}
for len(args) >= 3 {
cid, err := strconv.ParseUint(args[0], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing container ID %q from mapping %q as a number", args[0], s)
+ return nil, fmt.Errorf("error parsing container ID %q from mapping %q as a number: %w", args[0], s, err)
}
hostid, err := strconv.ParseUint(args[1], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing host ID %q from mapping %q as a number", args[1], s)
+ return nil, fmt.Errorf("error parsing host ID %q from mapping %q as a number: %w", args[1], s, err)
}
size, err := strconv.ParseUint(args[2], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing %q from mapping %q as a number", args[2], s)
+ return nil, fmt.Errorf("error parsing %q from mapping %q as a number: %w", args[2], s, err)
}
m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)})
args = args[3:]
@@ -793,7 +851,7 @@ func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name st
// if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go
if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) {
if _, err := os.Stat(how); err != nil {
- return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what)
+ return nil, define.NetworkDefault, fmt.Errorf("checking %s namespace: %w", what, err)
}
}
policy = define.NetworkEnabled
@@ -819,7 +877,7 @@ func defaultIsolation() (define.Isolation, error) {
case "chroot":
return define.IsolationChroot, nil
default:
- return 0, errors.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation)
+ return 0, fmt.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation)
}
}
if unshare.IsRootless() {
@@ -839,7 +897,7 @@ func IsolationOption(isolation string) (define.Isolation, error) {
case "chroot":
return define.IsolationChroot, nil
default:
- return 0, errors.Errorf("unrecognized isolation type %q", isolation)
+ return 0, fmt.Errorf("unrecognized isolation type %q", isolation)
}
}
return defaultIsolation()
@@ -859,7 +917,7 @@ func Device(device string) (string, string, string, error) {
switch len(arr) {
case 3:
if !isValidDeviceMode(arr[2]) {
- return "", "", "", errors.Errorf("invalid device mode: %s", arr[2])
+ return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2])
}
permissions = arr[2]
fallthrough
@@ -868,7 +926,7 @@ func Device(device string) (string, string, string, error) {
permissions = arr[1]
} else {
if len(arr[1]) == 0 || arr[1][0] != '/' {
- return "", "", "", errors.Errorf("invalid device mode: %s", arr[1])
+ return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1])
}
dst = arr[1]
}
@@ -880,7 +938,7 @@ func Device(device string) (string, string, string, error) {
}
fallthrough
default:
- return "", "", "", errors.Errorf("invalid device specification: %s", device)
+ return "", "", "", fmt.Errorf("invalid device specification: %s", device)
}
if dst == "" {
@@ -918,7 +976,7 @@ func GetTempDir() string {
// Secrets parses the --secret flag
func Secrets(secrets []string) (map[string]define.Secret, error) {
- invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]")
+ invalidSyntax := fmt.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]")
parsed := make(map[string]define.Secret)
for _, secret := range secrets {
tokens := strings.Split(secret, ",")
@@ -957,11 +1015,11 @@ func Secrets(secrets []string) (map[string]define.Secret, error) {
if typ == "file" {
fullPath, err := filepath.Abs(src)
if err != nil {
- return nil, errors.Wrap(err, "could not parse secrets")
+ return nil, fmt.Errorf("could not parse secrets: %w", err)
}
_, err = os.Stat(fullPath)
if err != nil {
- return nil, errors.Wrap(err, "could not parse secrets")
+ return nil, fmt.Errorf("could not parse secrets: %w", err)
}
src = fullPath
}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
index dcee5ca6f..a8b1d1a9a 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
@@ -1,14 +1,15 @@
+//go:build linux || darwin
// +build linux darwin
package parse
import (
+ "fmt"
"os"
"path/filepath"
"github.com/containers/buildah/define"
"github.com/opencontainers/runc/libcontainer/devices"
- "github.com/pkg/errors"
)
func DeviceFromPath(device string) (define.ContainerDevices, error) {
@@ -19,13 +20,13 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
}
srcInfo, err := os.Stat(src)
if err != nil {
- return nil, errors.Wrapf(err, "error getting info of source device %s", src)
+ return nil, fmt.Errorf("error getting info of source device %s: %w", src, err)
}
if !srcInfo.IsDir() {
dev, err := devices.DeviceFromPath(src, permissions)
if err != nil {
- return nil, errors.Wrapf(err, "%s is not a valid device", src)
+ return nil, fmt.Errorf("%s is not a valid device: %w", src, err)
}
dev.Path = dst
device := define.BuildahDevice{Device: *dev, Source: src, Destination: dst}
@@ -36,7 +37,7 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
// If source device is a directory
srcDevices, err := devices.GetDevices(src)
if err != nil {
- return nil, errors.Wrapf(err, "error getting source devices from directory %s", src)
+ return nil, fmt.Errorf("error getting source devices from directory %s: %w", src, err)
}
for _, d := range srcDevices {
d.Path = filepath.Join(dst, filepath.Base(d.Path))
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go
index c48b24884..e3d3a71b8 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go
@@ -1,10 +1,12 @@
+//go:build !linux && !darwin
// +build !linux,!darwin
package parse
import (
+ "errors"
+
"github.com/containers/buildah/define"
- "github.com/pkg/errors"
)
func getDefaultProcessLimits() []string {
@@ -12,5 +14,5 @@ func getDefaultProcessLimits() []string {
}
func DeviceFromPath(device string) (define.ContainerDevices, error) {
- return nil, errors.Errorf("devices not supported")
+ return nil, errors.New("devices not supported")
}
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
index 5bfed45c1..e0b9d37b3 100644
--- a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
@@ -1,12 +1,12 @@
+//go:build !windows
// +build !windows
package rusage
import (
+ "fmt"
"syscall"
"time"
-
- "github.com/pkg/errors"
)
func mkduration(tv syscall.Timeval) time.Duration {
@@ -17,7 +17,7 @@ func get() (Rusage, error) {
var rusage syscall.Rusage
err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage)
if err != nil {
- return Rusage{}, errors.Wrapf(err, "error getting resource usage")
+ return Rusage{}, fmt.Errorf("error getting resource usage: %w", err)
}
r := Rusage{
Date: time.Now(),
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
index 031c81402..46dd5ebe7 100644
--- a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
@@ -1,15 +1,15 @@
+//go:build windows
// +build windows
package rusage
import (
+ "fmt"
"syscall"
-
- "github.com/pkg/errors"
)
func get() (Rusage, error) {
- return Rusage{}, errors.Wrapf(syscall.ENOTSUP, "error getting resource usage")
+ return Rusage{}, fmt.Errorf("error getting resource usage: %w", syscall.ENOTSUP)
}
// Supported returns true if resource usage counters are supported on this OS.
diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go
index b985cd2b0..7a6563026 100644
--- a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go
+++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go
@@ -1,6 +1,8 @@
package sshagent
import (
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"net"
@@ -10,7 +12,6 @@ import (
"time"
"github.com/opencontainers/selinux/go-selinux"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
@@ -40,7 +41,7 @@ func newAgentServerKeyring(keys []interface{}) (*AgentServer, error) {
a := agent.NewKeyring()
for _, k := range keys {
if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil {
- return nil, errors.Wrap(err, "failed to create ssh agent")
+ return nil, fmt.Errorf("failed to create ssh agent: %w", err)
}
}
return &AgentServer{
@@ -216,7 +217,7 @@ func NewSource(paths []string) (*Source, error) {
k, err := ssh.ParseRawPrivateKey(dt)
if err != nil {
- return nil, errors.Wrapf(err, "cannot parse ssh key")
+ return nil, fmt.Errorf("cannot parse ssh key: %w", err)
}
keys = append(keys, k)
}
diff --git a/vendor/github.com/containers/buildah/pkg/util/util.go b/vendor/github.com/containers/buildah/pkg/util/util.go
index 209ad9544..20e9ede43 100644
--- a/vendor/github.com/containers/buildah/pkg/util/util.go
+++ b/vendor/github.com/containers/buildah/pkg/util/util.go
@@ -1,12 +1,11 @@
package util
import (
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
-
- "github.com/pkg/errors"
)
// Mirrors path to a tmpfile if path points to a
@@ -44,7 +43,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) {
// Test for existence of the file
target, err := os.Stat(path)
if err != nil {
- return "", errors.Wrap(err, "discovering Containerfile")
+ return "", fmt.Errorf("discovering Containerfile: %w", err)
}
switch mode := target.Mode(); {
@@ -61,7 +60,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) {
// Test for existence of the Dockerfile file
file, err = os.Stat(ctrfile)
if err != nil {
- return "", errors.Wrap(err, "cannot find Containerfile or Dockerfile in context directory")
+ return "", fmt.Errorf("cannot find Containerfile or Dockerfile in context directory: %w", err)
}
}
@@ -69,7 +68,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) {
if mode := file.Mode(); mode.IsRegular() {
foundCtrFile = ctrfile
} else {
- return "", errors.Errorf("assumed Containerfile %q is not a file", ctrfile)
+ return "", fmt.Errorf("assumed Containerfile %q is not a file", ctrfile)
}
case mode.IsRegular():
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index d203e6065..343c61fba 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -2,6 +2,7 @@ package buildah
import (
"context"
+ "fmt"
"io"
"time"
@@ -11,7 +12,6 @@ import (
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
- "github.com/pkg/errors"
)
// PullOptions can be used to alter how an image is copied in from somewhere.
@@ -69,7 +69,6 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s
libimageOptions.MaxRetries = &retries
}
-
pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String())
if err != nil {
return "", err
@@ -94,7 +93,7 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s
}
if len(pulledImages) == 0 {
- return "", errors.Errorf("internal error pulling %s: no image pulled and no error", imageName)
+ return "", fmt.Errorf("internal error pulling %s: no image pulled and no error", imageName)
}
return pulledImages[0].ID(), nil
diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go
index 65bda9ca0..a161bb279 100644
--- a/vendor/github.com/containers/buildah/push.go
+++ b/vendor/github.com/containers/buildah/push.go
@@ -17,7 +17,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -33,7 +32,7 @@ func cacheLookupReferenceFunc(directory string, compress types.LayerCompression)
}
ref, err := blobcache.NewBlobCache(ref, directory, compress)
if err != nil {
- return nil, errors.Wrapf(err, "error using blobcache %q", directory)
+ return nil, fmt.Errorf("error using blobcache %q: %w", directory, err)
}
return ref, nil
}
@@ -136,7 +135,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest))
+ return nil, "", fmt.Errorf("error computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
}
var ref reference.Canonical
diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go
new file mode 100644
index 000000000..b50afec0b
--- /dev/null
+++ b/vendor/github.com/containers/buildah/run_common.go
@@ -0,0 +1,1881 @@
+//go:build linux || freebsd
+// +build linux freebsd
+
+package buildah
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/exec"
+ "os/signal"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/containers/buildah/bind"
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal"
+ internalParse "github.com/containers/buildah/internal/parse"
+ internalUtil "github.com/containers/buildah/internal/util"
+ "github.com/containers/buildah/pkg/overlay"
+ "github.com/containers/buildah/pkg/sshagent"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libnetwork/etchosts"
+ "github.com/containers/common/libnetwork/network"
+ "github.com/containers/common/libnetwork/resolvconf"
+ netTypes "github.com/containers/common/libnetwork/types"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/subscriptions"
+ imageTypes "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/containers/storage/pkg/unshare"
+ storageTypes "github.com/containers/storage/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+ "golang.org/x/term"
+)
+
+// addResolvConf copies files from host and sets them up to bind mount into container
+func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaces []specs.LinuxNamespace) (string, error) {
+ defaultConfig, err := config.Default()
+ if err != nil {
+ return "", fmt.Errorf("failed to get config: %w", err)
+ }
+
+ nameservers := make([]string, 0, len(defaultConfig.Containers.DNSServers)+len(dnsServers))
+ nameservers = append(nameservers, defaultConfig.Containers.DNSServers...)
+ nameservers = append(nameservers, dnsServers...)
+
+ keepHostServers := false
+ // special check for slirp ip
+ if len(nameservers) == 0 && b.Isolation == IsolationOCIRootless {
+ for _, ns := range namespaces {
+ if ns.Type == specs.NetworkNamespace && ns.Path == "" {
+ keepHostServers = true
+ // if we are using slirp4netns, also add the built-in DNS server.
+ logrus.Debugf("adding slirp4netns 10.0.2.3 built-in DNS server")
+ nameservers = append([]string{"10.0.2.3"}, nameservers...)
+ }
+ }
+ }
+
+ searches := make([]string, 0, len(defaultConfig.Containers.DNSSearches)+len(dnsSearch))
+ searches = append(searches, defaultConfig.Containers.DNSSearches...)
+ searches = append(searches, dnsSearch...)
+
+ options := make([]string, 0, len(defaultConfig.Containers.DNSOptions)+len(dnsOptions))
+ options = append(options, defaultConfig.Containers.DNSOptions...)
+ options = append(options, dnsOptions...)
+
+ cfile := filepath.Join(rdir, "resolv.conf")
+ if err := resolvconf.New(&resolvconf.Params{
+ Path: cfile,
+ Namespaces: namespaces,
+ IPv6Enabled: true, // TODO we should check if we have ipv6
+ KeepHostServers: keepHostServers,
+ Nameservers: nameservers,
+ Searches: searches,
+ Options: options,
+ }); err != nil {
+ return "", fmt.Errorf("error building resolv.conf for container %s: %w", b.ContainerID, err)
+ }
+
+ uid := 0
+ gid := 0
+ if chownOpts != nil {
+ uid = chownOpts.UID
+ gid = chownOpts.GID
+ }
+ if err = os.Chown(cfile, uid, gid); err != nil {
+ return "", err
+ }
+
+ if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
+ return "", err
+ }
+ return cfile, nil
+}
+
+// generateHosts creates a containers hosts file
+func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string) (string, error) {
+ conf, err := config.Default()
+ if err != nil {
+ return "", err
+ }
+
+ path, err := etchosts.GetBaseHostFile(conf.Containers.BaseHostsFile, imageRoot)
+ if err != nil {
+ return "", err
+ }
+
+ targetfile := filepath.Join(rdir, "hosts")
+ if err := etchosts.New(&etchosts.Params{
+ BaseFile: path,
+ ExtraHosts: b.CommonBuildOpts.AddHost,
+ HostContainersInternalIP: etchosts.GetHostContainersInternalIP(conf, nil, nil),
+ TargetFile: targetfile,
+ }); err != nil {
+ return "", err
+ }
+
+ uid := 0
+ gid := 0
+ if chownOpts != nil {
+ uid = chownOpts.UID
+ gid = chownOpts.GID
+ }
+ if err = os.Chown(targetfile, uid, gid); err != nil {
+ return "", err
+ }
+ if err := label.Relabel(targetfile, b.MountLabel, false); err != nil {
+ return "", err
+ }
+
+ return targetfile, nil
+}
+
+// generateHostname creates a containers /etc/hostname file
+func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDPair) (string, error) {
+ var err error
+ hostnamePath := "/etc/hostname"
+
+ var hostnameBuffer bytes.Buffer
+ hostnameBuffer.Write([]byte(fmt.Sprintf("%s\n", hostname)))
+
+ cfile := filepath.Join(rdir, filepath.Base(hostnamePath))
+ if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil {
+ return "", fmt.Errorf("error writing /etc/hostname into the container: %w", err)
+ }
+
+ uid := 0
+ gid := 0
+ if chownOpts != nil {
+ uid = chownOpts.UID
+ gid = chownOpts.GID
+ }
+ if err = os.Chown(cfile, uid, gid); err != nil {
+ return "", err
+ }
+ if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
+ return "", err
+ }
+
+ return cfile, nil
+}
+
+func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) {
+ switch terminalPolicy {
+ case DefaultTerminal:
+ onTerminal := term.IsTerminal(unix.Stdin) && term.IsTerminal(unix.Stdout) && term.IsTerminal(unix.Stderr)
+ if onTerminal {
+ logrus.Debugf("stdio is a terminal, defaulting to using a terminal")
+ } else {
+ logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal")
+ }
+ g.SetProcessTerminal(onTerminal)
+ case WithTerminal:
+ g.SetProcessTerminal(true)
+ case WithoutTerminal:
+ g.SetProcessTerminal(false)
+ }
+ if terminalSize != nil {
+ g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height)
+ }
+}
+
+// Search for a command that isn't given as an absolute path using the $PATH
+// under the rootfs. We can't resolve absolute symbolic links without
+// chroot()ing, which we may not be able to do, so just accept a link as a
+// valid resolution.
+func runLookupPath(g *generate.Generator, command []string) []string {
+ // Look for the configured $PATH.
+ spec := g.Config
+ envPath := ""
+ for i := range spec.Process.Env {
+ if strings.HasPrefix(spec.Process.Env[i], "PATH=") {
+ envPath = spec.Process.Env[i]
+ }
+ }
+ // If there is no configured $PATH, supply one.
+ if envPath == "" {
+ defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin"
+ envPath = "PATH=" + defaultPath
+ g.AddProcessEnv("PATH", defaultPath)
+ }
+ // No command, nothing to do.
+ if len(command) == 0 {
+ return command
+ }
+ // Command is already an absolute path, use it as-is.
+ if filepath.IsAbs(command[0]) {
+ return command
+ }
+ // For each element in the PATH,
+ for _, pathEntry := range filepath.SplitList(envPath[5:]) {
+ // if it's the empty string, it's ".", which is the Cwd,
+ if pathEntry == "" {
+ pathEntry = spec.Process.Cwd
+ }
+ // build the absolute path which it might be,
+ candidate := filepath.Join(pathEntry, command[0])
+ // check if it's there,
+ if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil {
+ // and if it's not a directory, and either a symlink or executable,
+ if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) {
+ // use that.
+ return append([]string{candidate}, command[1:]...)
+ }
+ }
+ }
+ return command
+}
+
+func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) (string, error) {
+ // Set the user UID/GID/supplemental group list/capabilities lists.
+ user, homeDir, err := b.userForRun(mountPoint, options.User)
+ if err != nil {
+ return "", err
+ }
+ if err := setupCapabilities(g, b.Capabilities, options.AddCapabilities, options.DropCapabilities); err != nil {
+ return "", err
+ }
+ g.SetProcessUID(user.UID)
+ g.SetProcessGID(user.GID)
+ for _, gid := range user.AdditionalGids {
+ g.AddProcessAdditionalGid(gid)
+ }
+
+ // Remove capabilities if not running as root except Bounding set
+ if user.UID != 0 && g.Config.Process.Capabilities != nil {
+ bounding := g.Config.Process.Capabilities.Bounding
+ g.ClearProcessCapabilities()
+ g.Config.Process.Capabilities.Bounding = bounding
+ }
+
+ return homeDir, nil
+}
+
+func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions, defaultEnv []string) {
+ g.ClearProcessEnv()
+
+ if b.CommonBuildOpts.HTTPProxy {
+ for _, envSpec := range config.ProxyEnv {
+ if envVal, ok := os.LookupEnv(envSpec); ok {
+ g.AddProcessEnv(envSpec, envVal)
+ }
+ }
+ }
+
+ for _, envSpec := range util.MergeEnv(util.MergeEnv(defaultEnv, b.Env()), options.Env) {
+ env := strings.SplitN(envSpec, "=", 2)
+ if len(env) > 1 {
+ g.AddProcessEnv(env[0], env[1])
+ }
+ }
+}
+
+// getNetworkInterface creates the network interface
+func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (netTypes.ContainerNetwork, error) {
+ conf, err := config.Default()
+ if err != nil {
+ return nil, err
+ }
+ // copy the config to not modify the default by accident
+ newconf := *conf
+ if len(cniConfDir) > 0 {
+ newconf.Network.NetworkConfigDir = cniConfDir
+ }
+ if len(cniPluginPath) > 0 {
+ plugins := strings.Split(cniPluginPath, string(os.PathListSeparator))
+ newconf.Network.CNIPluginDirs = plugins
+ }
+
+ _, netInt, err := network.NetworkBackend(store, &newconf, false)
+ if err != nil {
+ return nil, err
+ }
+ return netInt, nil
+}
+
+// DefaultNamespaceOptions returns the default namespace settings from the
+// runtime-tools generator library.
+func DefaultNamespaceOptions() (define.NamespaceOptions, error) {
+ cfg, err := config.Default()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get container config: %w", err)
+ }
+ options := define.NamespaceOptions{
+ {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"},
+ {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"},
+ {Name: string(specs.MountNamespace), Host: false},
+ {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host"},
+ {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"},
+ {Name: string(specs.UserNamespace), Host: cfg.Containers.UserNS == "host"},
+ {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"},
+ }
+ return options, nil
+}
+
+func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error {
+ switch isolation {
+ case IsolationOCIRootless:
+ // only change the netns if the caller did not set it
+ if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns == nil {
+ if _, err := exec.LookPath("slirp4netns"); err != nil {
+ // if slirp4netns is not installed we have to use the hosts net namespace
+ options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
+ }
+ }
+ fallthrough
+ case IsolationOCI:
+ pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace))
+ userns := options.NamespaceOptions.Find(string(specs.UserNamespace))
+ if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) {
+ return fmt.Errorf("not allowed to mix host PID namespace with container user namespace")
+ }
+ }
+ return nil
+}
+
+// fileCloser is a helper struct to prevent closing the file twice in the code
+// users must call (fileCloser).Close() and not fileCloser.File.Close()
+type fileCloser struct {
+ file *os.File
+ closed bool
+}
+
+func (f *fileCloser) Close() {
+ if !f.closed {
+ if err := f.file.Close(); err != nil {
+ logrus.Errorf("failed to close file: %v", err)
+ }
+ f.closed = true
+ }
+}
+
+// waitForSync waits for a maximum of 4 minutes to read something from the file
+func waitForSync(pipeR *os.File) error {
+ if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil {
+ return err
+ }
+ b := make([]byte, 16)
+ _, err := pipeR.Read(b)
+ return err
+}
+
+func contains(volumes []string, v string) bool {
+ for _, i := range volumes {
+ if i == v {
+ return true
+ }
+ }
+ return false
+}
+
+func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string,
+ containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) {
+ if options.Logger == nil {
+ options.Logger = logrus.StandardLogger()
+ }
+
+ // Lock the caller to a single OS-level thread.
+ runtime.LockOSThread()
+
+ // Set up bind mounts for things that a namespaced user might not be able to get to directly.
+ unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath)
+ if unmountAll != nil {
+ defer func() {
+ if err := unmountAll(); err != nil {
+ options.Logger.Error(err)
+ }
+ }()
+ }
+ if err != nil {
+ return 1, err
+ }
+
+ // Write the runtime configuration.
+ specbytes, err := json.Marshal(spec)
+ if err != nil {
+ return 1, fmt.Errorf("error encoding configuration %#v as json: %w", spec, err)
+ }
+ if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
+ return 1, fmt.Errorf("error storing runtime configuration: %w", err)
+ }
+
+ logrus.Debugf("config = %v", string(specbytes))
+
+ // Decide which runtime to use.
+ runtime := options.Runtime
+ if runtime == "" {
+ runtime = util.Runtime()
+ }
+ localRuntime := util.FindLocalRuntime(runtime)
+ if localRuntime != "" {
+ runtime = localRuntime
+ }
+
+ // Default to just passing down our stdio.
+ getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
+ return os.Stdin, os.Stdout, os.Stderr
+ }
+
+ // Figure out how we're doing stdio handling, and create pipes and sockets.
+ var stdio sync.WaitGroup
+ var consoleListener *net.UnixListener
+ var errorFds, closeBeforeReadingErrorFds []int
+ stdioPipe := make([][]int, 3)
+ copyConsole := false
+ copyPipes := false
+ finishCopy := make([]int, 2)
+ if err = unix.Pipe(finishCopy); err != nil {
+ return 1, fmt.Errorf("error creating pipe for notifying to stop stdio: %w", err)
+ }
+ finishedCopy := make(chan struct{}, 1)
+ var pargs []string
+ if spec.Process != nil {
+ pargs = spec.Process.Args
+ if spec.Process.Terminal {
+ copyConsole = true
+ // Create a listening socket for accepting the container's terminal's PTY master.
+ socketPath := filepath.Join(bundlePath, "console.sock")
+ consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"})
+ if err != nil {
+ return 1, fmt.Errorf("error creating socket %q to receive terminal descriptor: %w", consoleListener.Addr(), err)
+ }
+ // Add console socket arguments.
+ moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath)
+ } else {
+ copyPipes = true
+ // Figure out who should own the pipes.
+ uid, gid, err := util.GetHostRootIDs(spec)
+ if err != nil {
+ return 1, err
+ }
+ // Create stdio pipes.
+ if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil {
+ return 1, err
+ }
+ if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil {
+ return 1, err
+ }
+ errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]}
+ closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]}
+ // Set stdio to our pipes.
+ getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
+ stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin")
+ stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout")
+ stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr")
+ return stdin, stdout, stderr
+ }
+ }
+ } else {
+ if options.Quiet {
+ // Discard stdout.
+ getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
+ return os.Stdin, nil, os.Stderr
+ }
+ }
+ }
+
+ runtimeArgs := options.Args[:]
+ if options.CgroupManager == config.SystemdCgroupsManager {
+ runtimeArgs = append(runtimeArgs, "--systemd-cgroup")
+ }
+
+ // Build the commands that we'll execute.
+ pidFile := filepath.Join(bundlePath, "pid")
+ args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName)
+ create := exec.Command(runtime, args...)
+ setPdeathsig(create)
+ create.Dir = bundlePath
+ stdin, stdout, stderr := getCreateStdio()
+ create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr
+
+ args = append(options.Args, "start", containerName)
+ start := exec.Command(runtime, args...)
+ setPdeathsig(start)
+ start.Dir = bundlePath
+ start.Stderr = os.Stderr
+
+ kill := func(signal string) *exec.Cmd {
+ args := append(options.Args, "kill", containerName)
+ if signal != "" {
+ args = append(args, signal)
+ }
+ kill := exec.Command(runtime, args...)
+ kill.Dir = bundlePath
+ kill.Stderr = os.Stderr
+ return kill
+ }
+
+ args = append(options.Args, "delete", containerName)
+ del := exec.Command(runtime, args...)
+ del.Dir = bundlePath
+ del.Stderr = os.Stderr
+
+ // Actually create the container.
+ logrus.Debugf("Running %q", create.Args)
+ err = create.Run()
+ if err != nil {
+ return 1, fmt.Errorf("error from %s creating container for %v: %s: %w", runtime, pargs, runCollectOutput(options.Logger, errorFds, closeBeforeReadingErrorFds), err)
+ }
+ defer func() {
+ err2 := del.Run()
+ if err2 != nil {
+ if err == nil {
+ err = fmt.Errorf("error deleting container: %w", err2)
+ } else {
+ options.Logger.Infof("error from %s deleting container: %v", runtime, err2)
+ }
+ }
+ }()
+
+ // Make sure we read the container's exit status when it exits.
+ pidValue, err := ioutil.ReadFile(pidFile)
+ if err != nil {
+ return 1, err
+ }
+ pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
+ if err != nil {
+ return 1, fmt.Errorf("error parsing pid %s as a number: %w", string(pidValue), err)
+ }
+ var stopped uint32
+ var reaping sync.WaitGroup
+ reaping.Add(1)
+ go func() {
+ defer reaping.Done()
+ var err error
+ _, err = unix.Wait4(pid, &wstatus, 0, nil)
+ if err != nil {
+ wstatus = 0
+ options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err)
+ }
+ atomic.StoreUint32(&stopped, 1)
+ }()
+
+ if configureNetwork {
+ if _, err := containerCreateW.Write([]byte{1}); err != nil {
+ return 1, err
+ }
+ containerCreateW.Close()
+ logrus.Debug("waiting for parent start message")
+ b := make([]byte, 1)
+ if _, err := containerStartR.Read(b); err != nil {
+ return 1, fmt.Errorf("did not get container start message from parent: %w", err)
+ }
+ containerStartR.Close()
+ }
+
+ if copyPipes {
+ // We don't need the ends of the pipes that belong to the container.
+ stdin.Close()
+ if stdout != nil {
+ stdout.Close()
+ }
+ stderr.Close()
+ }
+
+ // Handle stdio for the container in the background.
+ stdio.Add(1)
+ go runCopyStdio(options.Logger, &stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec)
+
+ // Start the container.
+ logrus.Debugf("Running %q", start.Args)
+ err = start.Run()
+ if err != nil {
+ return 1, fmt.Errorf("error from %s starting container: %w", runtime, err)
+ }
+ defer func() {
+ if atomic.LoadUint32(&stopped) == 0 {
+ if err := kill("").Run(); err != nil {
+ options.Logger.Infof("error from %s stopping container: %v", runtime, err)
+ }
+ atomic.StoreUint32(&stopped, 1)
+ }
+ }()
+
+ // Wait for the container to exit.
+ interrupted := make(chan os.Signal, 100)
+ go func() {
+ for range interrupted {
+ if err := kill("SIGKILL").Run(); err != nil {
+ logrus.Errorf("%v sending SIGKILL", err)
+ }
+ }
+ }()
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+ for {
+ now := time.Now()
+ var state specs.State
+ args = append(options.Args, "state", containerName)
+ stat := exec.Command(runtime, args...)
+ stat.Dir = bundlePath
+ stat.Stderr = os.Stderr
+ stateOutput, err := stat.Output()
+ if err != nil {
+ if atomic.LoadUint32(&stopped) != 0 {
+ // container exited
+ break
+ }
+ return 1, fmt.Errorf("error reading container state from %s (got output: %q): %w", runtime, string(stateOutput), err)
+ }
+ if err = json.Unmarshal(stateOutput, &state); err != nil {
+ return 1, fmt.Errorf("error parsing container state %q from %s: %w", string(stateOutput), runtime, err)
+ }
+ switch state.Status {
+ case "running":
+ case "stopped":
+ atomic.StoreUint32(&stopped, 1)
+ default:
+ return 1, fmt.Errorf("container status unexpectedly changed to %q", state.Status)
+ }
+ if atomic.LoadUint32(&stopped) != 0 {
+ break
+ }
+ select {
+ case <-finishedCopy:
+ atomic.StoreUint32(&stopped, 1)
+ case <-time.After(time.Until(now.Add(100 * time.Millisecond))):
+ continue
+ }
+ if atomic.LoadUint32(&stopped) != 0 {
+ break
+ }
+ }
+ signal.Stop(interrupted)
+ close(interrupted)
+
+ // Close the writing end of the stop-handling-stdio notification pipe.
+ unix.Close(finishCopy[1])
+ // Wait for the stdio copy goroutine to flush.
+ stdio.Wait()
+ // Wait until we finish reading the exit status.
+ reaping.Wait()
+
+ return wstatus, nil
+}
+
+func runCollectOutput(logger *logrus.Logger, fds, closeBeforeReadingFds []int) string { //nolint:interfacer
+ for _, fd := range closeBeforeReadingFds {
+ unix.Close(fd)
+ }
+ var b bytes.Buffer
+ buf := make([]byte, 8192)
+ for _, fd := range fds {
+ nread, err := unix.Read(fd, buf)
+ if err != nil {
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ switch errno {
+ default:
+ logger.Errorf("error reading from pipe %d: %v", fd, err)
+ case syscall.EINTR, syscall.EAGAIN:
+ }
+ } else {
+ logger.Errorf("unable to wait for data from pipe %d: %v", fd, err)
+ }
+ continue
+ }
+ for nread > 0 {
+ r := buf[:nread]
+ if nwritten, err := b.Write(r); err != nil || nwritten != len(r) {
+ if nwritten != len(r) {
+ logger.Errorf("error buffering data from pipe %d: %v", fd, err)
+ break
+ }
+ }
+ nread, err = unix.Read(fd, buf)
+ if err != nil {
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ switch errno {
+ default:
+ logger.Errorf("error reading from pipe %d: %v", fd, err)
+ case syscall.EINTR, syscall.EAGAIN:
+ }
+ } else {
+ logger.Errorf("unable to wait for data from pipe %d: %v", fd, err)
+ }
+ break
+ }
+ }
+ }
+ return b.String()
+}
+
+func setNonblock(logger *logrus.Logger, fd int, description string, nonblocking bool) (bool, error) { //nolint:interfacer
+ mask, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0)
+ if err != nil {
+ return false, err
+ }
+ blocked := mask&unix.O_NONBLOCK == 0
+
+ if err := unix.SetNonblock(fd, nonblocking); err != nil {
+ if nonblocking {
+ logger.Errorf("error setting %s to nonblocking: %v", description, err)
+ } else {
+ logger.Errorf("error setting descriptor %s blocking: %v", description, err)
+ }
+ }
+ return blocked, err
+}
+
+func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) {
+ defer func() {
+ unix.Close(finishCopy[0])
+ if copyPipes {
+ unix.Close(stdioPipe[unix.Stdin][1])
+ unix.Close(stdioPipe[unix.Stdout][0])
+ unix.Close(stdioPipe[unix.Stderr][0])
+ }
+ stdio.Done()
+ finishedCopy <- struct{}{}
+ close(finishedCopy)
+ }()
+ // Map describing where data on an incoming descriptor should go.
+ relayMap := make(map[int]int)
+ // Map describing incoming and outgoing descriptors.
+ readDesc := make(map[int]string)
+ writeDesc := make(map[int]string)
+ // Buffers.
+ relayBuffer := make(map[int]*bytes.Buffer)
+ // Set up the terminal descriptor or pipes for polling.
+ if copyConsole {
+ // Accept a connection over our listening socket.
+ fd, err := runAcceptTerminal(logger, consoleListener, spec.Process.ConsoleSize)
+ if err != nil {
+ logger.Errorf("%v", err)
+ return
+ }
+ terminalFD := fd
+ // Input from our stdin, output from the terminal descriptor.
+ relayMap[unix.Stdin] = terminalFD
+ readDesc[unix.Stdin] = "stdin"
+ relayBuffer[terminalFD] = new(bytes.Buffer)
+ writeDesc[terminalFD] = "container terminal input"
+ relayMap[terminalFD] = unix.Stdout
+ readDesc[terminalFD] = "container terminal output"
+ relayBuffer[unix.Stdout] = new(bytes.Buffer)
+ writeDesc[unix.Stdout] = "output"
+ // Set our terminal's mode to raw, to pass handling of special
+ // terminal input to the terminal in the container.
+ if term.IsTerminal(unix.Stdin) {
+ if state, err := term.MakeRaw(unix.Stdin); err != nil {
+ logger.Warnf("error setting terminal state: %v", err)
+ } else {
+ defer func() {
+ if err = term.Restore(unix.Stdin, state); err != nil {
+ logger.Errorf("unable to restore terminal state: %v", err)
+ }
+ }()
+ }
+ }
+ }
+ if copyPipes {
+ // Input from our stdin, output from the stdout and stderr pipes.
+ relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1]
+ readDesc[unix.Stdin] = "stdin"
+ relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer)
+ writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin"
+ relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout
+ readDesc[stdioPipe[unix.Stdout][0]] = "container stdout"
+ relayBuffer[unix.Stdout] = new(bytes.Buffer)
+ writeDesc[unix.Stdout] = "stdout"
+ relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr
+ readDesc[stdioPipe[unix.Stderr][0]] = "container stderr"
+ relayBuffer[unix.Stderr] = new(bytes.Buffer)
+ writeDesc[unix.Stderr] = "stderr"
+ }
+ // Set our reading descriptors to non-blocking.
+ for rfd, wfd := range relayMap {
+ blocked, err := setNonblock(logger, rfd, readDesc[rfd], true)
+ if err != nil {
+ return
+ }
+ if blocked {
+ defer setNonblock(logger, rfd, readDesc[rfd], false) // nolint:errcheck
+ }
+ setNonblock(logger, wfd, writeDesc[wfd], false) // nolint:errcheck
+ }
+
+ if copyPipes {
+ setNonblock(logger, stdioPipe[unix.Stdin][1], writeDesc[stdioPipe[unix.Stdin][1]], true) // nolint:errcheck
+ }
+
+ runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc)
+}
+
+func canRetry(err error) bool {
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ return errno == syscall.EINTR || errno == syscall.EAGAIN
+ }
+ return false
+}
+
+func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) {
+ closeStdin := false
+
+ // Pass data back and forth.
+ pollTimeout := -1
+ for len(relayMap) > 0 {
+ // Start building the list of descriptors to poll.
+ pollFds := make([]unix.PollFd, 0, len(relayMap)+1)
+ // Poll for a notification that we should stop handling stdio.
+ pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP})
+ // Poll on our reading descriptors.
+ for rfd := range relayMap {
+ pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP})
+ }
+ buf := make([]byte, 8192)
+ // Wait for new data from any input descriptor, or a notification that we're done.
+ _, err := unix.Poll(pollFds, pollTimeout)
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
+ return
+ }
+ removes := make(map[int]struct{})
+ for _, pollFd := range pollFds {
+ // If this descriptor's just been closed from the other end, mark it for
+ // removal from the set that we're checking for.
+ if pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
+ removes[int(pollFd.Fd)] = struct{}{}
+ }
+ // If the descriptor was closed elsewhere, remove it from our list.
+ if pollFd.Revents&unix.POLLNVAL != 0 {
+ logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)])
+ removes[int(pollFd.Fd)] = struct{}{}
+ }
+ // If the POLLIN flag isn't set, then there's no data to be read from this descriptor.
+ if pollFd.Revents&unix.POLLIN == 0 {
+ continue
+ }
+ // Read whatever there is to be read.
+ readFD := int(pollFd.Fd)
+ writeFD, needToRelay := relayMap[readFD]
+ if needToRelay {
+ n, err := unix.Read(readFD, buf)
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
+ return
+ }
+ // If it's zero-length on our stdin and we're
+ // using pipes, it's an EOF, so close the stdin
+ // pipe's writing end.
+ if n == 0 && !canRetry(err) && int(pollFd.Fd) == unix.Stdin {
+ removes[int(pollFd.Fd)] = struct{}{}
+ } else if n > 0 {
+ // Buffer the data in case we get blocked on where they need to go.
+ nwritten, err := relayBuffer[writeFD].Write(buf[:n])
+ if err != nil {
+ logrus.Debugf("buffer: %v", err)
+ continue
+ }
+ if nwritten != n {
+ logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten)
+ continue
+ }
+ // If this is the last of the data we'll be able to read from this
+ // descriptor, read all that there is to read.
+ for pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
+ nr, err := unix.Read(readFD, buf)
+ util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err))
+ if nr <= 0 {
+ break
+ }
+ nwritten, err := relayBuffer[writeFD].Write(buf[:nr])
+ if err != nil {
+ logrus.Debugf("buffer: %v", err)
+ break
+ }
+ if nwritten != nr {
+ logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten)
+ break
+ }
+ }
+ }
+ }
+ }
+ // Try to drain the output buffers. Set the default timeout
+ // for the next poll() to 100ms if we still have data to write.
+ pollTimeout = -1
+ for writeFD := range relayBuffer {
+ if relayBuffer[writeFD].Len() > 0 {
+ n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes())
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
+ return
+ }
+ if n > 0 {
+ relayBuffer[writeFD].Next(n)
+ }
+ if closeStdin && writeFD == stdioPipe[unix.Stdin][1] && stdioPipe[unix.Stdin][1] >= 0 && relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 {
+ logrus.Debugf("closing stdin")
+ unix.Close(stdioPipe[unix.Stdin][1])
+ stdioPipe[unix.Stdin][1] = -1
+ }
+ }
+ if relayBuffer[writeFD].Len() > 0 {
+ pollTimeout = 100
+ }
+ }
+ // Remove any descriptors which we don't need to poll any more from the poll descriptor list.
+ for remove := range removes {
+ if copyPipes && remove == unix.Stdin {
+ closeStdin = true
+ if relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 {
+ logrus.Debugf("closing stdin")
+ unix.Close(stdioPipe[unix.Stdin][1])
+ stdioPipe[unix.Stdin][1] = -1
+ }
+ }
+ delete(relayMap, remove)
+ }
+ // If the we-can-return pipe had anything for us, we're done.
+ for _, pollFd := range pollFds {
+ if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 {
+ // The pipe is closed, indicating that we can stop now.
+ return
+ }
+ }
+ }
+}
+
+func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) {
+ defer consoleListener.Close()
+ c, err := consoleListener.AcceptUnix()
+ if err != nil {
+ return -1, fmt.Errorf("error accepting socket descriptor connection: %w", err)
+ }
+ defer c.Close()
+ // Expect a control message over our new connection.
+ b := make([]byte, 8192)
+ oob := make([]byte, 8192)
+ n, oobn, _, _, err := c.ReadMsgUnix(b, oob)
+ if err != nil {
+ return -1, fmt.Errorf("error reading socket descriptor: %w", err)
+ }
+ if n > 0 {
+ logrus.Debugf("socket descriptor is for %q", string(b[:n]))
+ }
+ if oobn > len(oob) {
+ return -1, fmt.Errorf("too much out-of-bounds data (%d bytes)", oobn)
+ }
+ // Parse the control message.
+ scm, err := unix.ParseSocketControlMessage(oob[:oobn])
+ if err != nil {
+ return -1, fmt.Errorf("error parsing out-of-bound data as a socket control message: %w", err)
+ }
+ logrus.Debugf("control messages: %v", scm)
+ // Expect to get a descriptor.
+ terminalFD := -1
+ for i := range scm {
+ fds, err := unix.ParseUnixRights(&scm[i])
+ if err != nil {
+ return -1, fmt.Errorf("error parsing unix rights control message: %v: %w", &scm[i], err)
+ }
+ logrus.Debugf("fds: %v", fds)
+ if len(fds) == 0 {
+ continue
+ }
+ terminalFD = fds[0]
+ break
+ }
+ if terminalFD == -1 {
+ return -1, fmt.Errorf("unable to read terminal descriptor")
+ }
+ // Set the pseudoterminal's size to the configured size, or our own.
+ winsize := &unix.Winsize{}
+ if terminalSize != nil {
+ // Use configured sizes.
+ winsize.Row = uint16(terminalSize.Height)
+ winsize.Col = uint16(terminalSize.Width)
+ } else {
+ if term.IsTerminal(unix.Stdin) {
+ // Use the size of our terminal.
+ if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil {
+ logger.Warnf("error reading size of controlling terminal: %v", err)
+ winsize.Row = 0
+ winsize.Col = 0
+ }
+ }
+ }
+ if winsize.Row != 0 && winsize.Col != 0 {
+ if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil {
+ logger.Warnf("error setting size of container pseudoterminal: %v", err)
+ }
+ // FIXME - if we're connected to a terminal, we should
+ // be passing the updated terminal size down when we
+ // receive a SIGWINCH.
+ }
+ return terminalFD, nil
+}
+
+func runUsingRuntimeMain() {
+ var options runUsingRuntimeSubprocOptions
+ // Set logging.
+ if level := os.Getenv("LOGLEVEL"); level != "" {
+ if ll, err := strconv.Atoi(level); err == nil {
+ logrus.SetLevel(logrus.Level(ll))
+ }
+ }
+ // Unpack our configuration.
+ confPipe := os.NewFile(3, "confpipe")
+ if confPipe == nil {
+ fmt.Fprintf(os.Stderr, "error reading options pipe\n")
+ os.Exit(1)
+ }
+ defer confPipe.Close()
+ if err := json.NewDecoder(confPipe).Decode(&options); err != nil {
+ fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err)
+ os.Exit(1)
+ }
+ // Set ourselves up to read the container's exit status. We're doing this in a child process
+ // so that we won't mess with the setting in a caller of the library.
+ if err := setChildProcess(); err != nil {
+ os.Exit(1)
+ }
+ ospec := options.Spec
+ if ospec == nil {
+ fmt.Fprintf(os.Stderr, "options spec not specified\n")
+ os.Exit(1)
+ }
+
+ // open the pipes used to communicate with the parent process
+ var containerCreateW *os.File
+ var containerStartR *os.File
+ if options.ConfigureNetwork {
+ containerCreateW = os.NewFile(4, "containercreatepipe")
+ if containerCreateW == nil {
+ fmt.Fprintf(os.Stderr, "could not open fd 4\n")
+ os.Exit(1)
+ }
+ containerStartR = os.NewFile(5, "containerstartpipe")
+ if containerStartR == nil {
+ fmt.Fprintf(os.Stderr, "could not open fd 5\n")
+ os.Exit(1)
+ }
+ }
+
+ // Run the container, start to finish.
+ status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
+ os.Exit(1)
+ }
+ // Pass the container's exit status back to the caller by exiting with the same status.
+ if status.Exited() {
+ os.Exit(status.ExitStatus())
+ } else if status.Signaled() {
+ fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal())
+ os.Exit(1)
+ }
+ os.Exit(1)
+}
+
+func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks,
+ moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile string) (err error) {
+ var confwg sync.WaitGroup
+ config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
+ Options: options,
+ Spec: spec,
+ RootPath: rootPath,
+ BundlePath: bundlePath,
+ ConfigureNetwork: configureNetwork,
+ MoreCreateArgs: moreCreateArgs,
+ ContainerName: containerName,
+ Isolation: isolation,
+ })
+ if conferr != nil {
+ return fmt.Errorf("error encoding configuration for %q: %w", runUsingRuntimeCommand, conferr)
+ }
+ cmd := reexec.Command(runUsingRuntimeCommand)
+ setPdeathsig(cmd)
+ cmd.Dir = bundlePath
+ cmd.Stdin = options.Stdin
+ if cmd.Stdin == nil {
+ cmd.Stdin = os.Stdin
+ }
+ cmd.Stdout = options.Stdout
+ if cmd.Stdout == nil {
+ cmd.Stdout = os.Stdout
+ }
+ cmd.Stderr = options.Stderr
+ if cmd.Stderr == nil {
+ cmd.Stderr = os.Stderr
+ }
+ cmd.Env = util.MergeEnv(os.Environ(), []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())})
+ preader, pwriter, err := os.Pipe()
+ if err != nil {
+ return fmt.Errorf("error creating configuration pipe: %w", err)
+ }
+ confwg.Add(1)
+ go func() {
+ _, conferr = io.Copy(pwriter, bytes.NewReader(config))
+ if conferr != nil {
+ conferr = fmt.Errorf("error while copying configuration down pipe to child process: %w", conferr)
+ }
+ confwg.Done()
+ }()
+
+ // create network configuration pipes
+ var containerCreateR, containerCreateW fileCloser
+ var containerStartR, containerStartW fileCloser
+ if configureNetwork {
+ containerCreateR.file, containerCreateW.file, err = os.Pipe()
+ if err != nil {
+ return fmt.Errorf("error creating container create pipe: %w", err)
+ }
+ defer containerCreateR.Close()
+ defer containerCreateW.Close()
+
+ containerStartR.file, containerStartW.file, err = os.Pipe()
+ if err != nil {
+ return fmt.Errorf("error creating container create pipe: %w", err)
+ }
+ defer containerStartR.Close()
+ defer containerStartW.Close()
+ cmd.ExtraFiles = []*os.File{containerCreateW.file, containerStartR.file}
+ }
+
+ cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
+ defer preader.Close()
+ defer pwriter.Close()
+ if err := cmd.Start(); err != nil {
+ return fmt.Errorf("error while starting runtime: %w", err)
+ }
+
+ interrupted := make(chan os.Signal, 100)
+ go func() {
+ for receivedSignal := range interrupted {
+ if err := cmd.Process.Signal(receivedSignal); err != nil {
+ logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal)
+ }
+ }
+ }()
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+
+ if configureNetwork {
+ // we already passed the fd to the child, now close the writer so we do not hang if the child closes it
+ containerCreateW.Close()
+ if err := waitForSync(containerCreateR.file); err != nil {
+ // we do not want to return here since we want to capture the exit code from the child via cmd.Wait()
+ // close the pipes here so that the child will not hang forever
+ containerCreateR.Close()
+ containerStartW.Close()
+ logrus.Errorf("did not get container create message from subprocess: %v", err)
+ } else {
+ pidFile := filepath.Join(bundlePath, "pid")
+ pidValue, err := ioutil.ReadFile(pidFile)
+ if err != nil {
+ return err
+ }
+ pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
+ if err != nil {
+ return fmt.Errorf("error parsing pid %s as a number: %w", string(pidValue), err)
+ }
+
+ teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName)
+ if teardown != nil {
+ defer teardown()
+ }
+ if err != nil {
+ return err
+ }
+
+ // only add hosts if we manage the hosts file
+ if hostsFile != "" {
+ var entries etchosts.HostEntries
+ if netstatus != nil {
+ entries = etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName)
+ } else {
+ // we have slirp4netns, default to slirp4netns ip since this is not configurable in buildah
+ entries = etchosts.HostEntries{{IP: "10.0.2.100", Names: []string{spec.Hostname, buildContainerName}}}
+ }
+ // make sure to sync this with (b *Builder) generateHosts()
+ err = etchosts.Add(hostsFile, entries)
+ if err != nil {
+ return err
+ }
+ }
+
+ logrus.Debug("network namespace successfully setup, send start message to child")
+ _, err = containerStartW.file.Write([]byte{1})
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := cmd.Wait(); err != nil {
+ return fmt.Errorf("error while running runtime: %w", err)
+ }
+ confwg.Wait()
+ signal.Stop(interrupted)
+ close(interrupted)
+ if err == nil {
+ return conferr
+ }
+ if conferr != nil {
+ logrus.Debugf("%v", conferr)
+ }
+ return err
+}
+
+type runUsingRuntimeSubprocOptions struct {
+ Options RunOptions
+ Spec *specs.Spec
+ RootPath string
+ BundlePath string
+ ConfigureNetwork bool
+ MoreCreateArgs []string
+ ContainerName string
+ Isolation define.Isolation
+}
+
+func init() {
+ reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain)
+}
+
+func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) {
+ // Start building a new list of mounts.
+ var mounts []specs.Mount
+ haveMount := func(destination string) bool {
+ for _, mount := range mounts {
+ if mount.Destination == destination {
+ // Already have something to mount there.
+ return true
+ }
+ }
+ return false
+ }
+
+ specMounts, err := setupSpecialMountSpecChanges(spec, b.CommonBuildOpts.ShmSize)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the list of files we need to bind into the container.
+ bindFileMounts := runSetupBoundFiles(bundlePath, bindFiles)
+
+ // After this point we need to know the per-container persistent storage directory.
+ cdir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return nil, fmt.Errorf("error determining work directory for container %q: %w", b.ContainerID, err)
+ }
+
+ // Figure out which UID and GID to tell the subscriptions package to use
+ // for files that it creates.
+ rootUID, rootGID, err := util.GetHostRootIDs(spec)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get host UID and GID of the container process.
+ var uidMap = []specs.LinuxIDMapping{}
+ var gidMap = []specs.LinuxIDMapping{}
+ if spec.Linux != nil {
+ uidMap = spec.Linux.UIDMappings
+ gidMap = spec.Linux.GIDMappings
+ }
+ processUID, processGID, err := util.GetHostIDs(uidMap, gidMap, spec.Process.User.UID, spec.Process.User.GID)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the list of subscriptions mounts.
+ subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false)
+
+ idMaps := IDMaps{
+ uidmap: uidMap,
+ gidmap: gidMap,
+ rootUID: int(rootUID),
+ rootGID: int(rootGID),
+ processUID: int(processUID),
+ processGID: int(processGID),
+ }
+ // Get the list of mounts that are just for this Run() call.
+ runMounts, mountArtifacts, err := b.runSetupRunMounts(runFileMounts, runMountInfo, idMaps)
+ if err != nil {
+ return nil, err
+ }
+ // Add temporary copies of the contents of volume locations at the
+ // volume locations, unless we already have something there.
+ builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the list of explicitly-specified volume mounts.
+ var mountLabel = ""
+ if spec.Linux != nil {
+ mountLabel = spec.Linux.MountLabel
+ }
+ volumes, err := b.runSetupVolumeMounts(mountLabel, volumeMounts, optionMounts, idMaps)
+ if err != nil {
+ return nil, err
+ }
+
+ // prepare list of mount destinations which can be cleaned up safely.
+ // we can clean bindFiles, subscriptionMounts and specMounts
+ // everything other than these might have users content
+ mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...)
+
+ allMounts := util.SortMounts(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...))
+ // Add them all, in the preferred order, except where they conflict with something that was previously added.
+ for _, mount := range allMounts {
+ if haveMount(mount.Destination) {
+ // Already mounting something there, no need to bother with this one.
+ continue
+ }
+ // Add the mount.
+ mounts = append(mounts, mount)
+ }
+
+ // Set the list in the spec.
+ spec.Mounts = mounts
+ return mountArtifacts, nil
+}
+
+func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
+ var mounts []specs.Mount
+ hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
+ // Add temporary copies of the contents of volume locations at the
+ // volume locations, unless we already have something there.
+ for _, volume := range builtinVolumes {
+ volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex())
+ initializeVolume := false
+ // If we need to, create the directory that we'll use to hold
+ // the volume contents. If we do need to create it, then we'll
+ // need to populate it, too, so make a note of that.
+ if _, err := os.Stat(volumePath); err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+ logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume)
+ if err = os.MkdirAll(volumePath, 0755); err != nil {
+ return nil, err
+ }
+ if err = label.Relabel(volumePath, mountLabel, false); err != nil {
+ return nil, err
+ }
+ initializeVolume = true
+ }
+ // Make sure the volume exists in the rootfs and read its attributes.
+ createDirPerms := os.FileMode(0755)
+ err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{
+ ChownNew: &hostOwner,
+ ChmodNew: &createDirPerms,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("ensuring volume path %q: %w", filepath.Join(mountPoint, volume), err)
+ }
+ srcPath, err := copier.Eval(mountPoint, filepath.Join(mountPoint, volume), copier.EvalOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("evaluating path %q: %w", srcPath, err)
+ }
+ stat, err := os.Stat(srcPath)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ // If we need to populate the mounted volume's contents with
+ // content from the rootfs, set it up now.
+ if initializeVolume {
+ if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
+ return nil, err
+ }
+ if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
+ return nil, err
+ }
+ logrus.Debugf("populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
+ if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("error populating directory %q for volume %q using contents of %q: %w", volumePath, volume, srcPath, err)
+ }
+ }
+ // Add the bind mount.
+ mounts = append(mounts, specs.Mount{
+ Source: volumePath,
+ Destination: volume,
+ Type: define.TypeBind,
+ Options: define.BindOptions,
+ })
+ }
+ return mounts, nil
+}
+
+// Destinations which can be cleaned up after every RUN
+func cleanableDestinationListFromMounts(mounts []spec.Mount) []string {
+ mountDest := []string{}
+ for _, mount := range mounts {
+ // Add all destination to mountArtifacts so that they can be cleaned up later
+ if mount.Destination != "" {
+ cleanPath := true
+ for _, prefix := range nonCleanablePrefixes {
+ if strings.HasPrefix(mount.Destination, prefix) {
+ cleanPath = false
+ break
+ }
+ }
+ if cleanPath {
+ mountDest = append(mountDest, mount.Destination)
+ }
+ }
+ }
+ return mountDest
+}
+
+// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs
+func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
+ mountTargets := make([]string, 0, 10)
+ tmpFiles := make([]string, 0, len(mounts))
+ mountImages := make([]string, 0, 10)
+ finalMounts := make([]specs.Mount, 0, len(mounts))
+ agents := make([]*sshagent.AgentServer, 0, len(mounts))
+ sshCount := 0
+ defaultSSHSock := ""
+ tokens := []string{}
+ lockedTargets := []string{}
+ for _, mount := range mounts {
+ arr := strings.SplitN(mount, ",", 2)
+
+ kv := strings.Split(arr[0], "=")
+ if len(kv) != 2 || kv[0] != "type" {
+ return nil, nil, errors.New("invalid mount type")
+ }
+ if len(arr) == 2 {
+ tokens = strings.Split(arr[1], ",")
+ }
+
+ switch kv[1] {
+ case "secret":
+ mount, envFile, err := b.getSecretMount(tokens, sources.Secrets, idMaps)
+ if err != nil {
+ return nil, nil, err
+ }
+ if mount != nil {
+ finalMounts = append(finalMounts, *mount)
+ mountTargets = append(mountTargets, mount.Destination)
+ if envFile != "" {
+ tmpFiles = append(tmpFiles, envFile)
+ }
+ }
+ case "ssh":
+ mount, agent, err := b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps)
+ if err != nil {
+ return nil, nil, err
+ }
+ if mount != nil {
+ finalMounts = append(finalMounts, *mount)
+ mountTargets = append(mountTargets, mount.Destination)
+ agents = append(agents, agent)
+ if sshCount == 0 {
+ defaultSSHSock = mount.Destination
+ }
+ // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i}
+ sshCount++
+ }
+ case "bind":
+ mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps)
+ if err != nil {
+ return nil, nil, err
+ }
+ finalMounts = append(finalMounts, *mount)
+ mountTargets = append(mountTargets, mount.Destination)
+ // only perform cleanup if image was mounted ignore everything else
+ if image != "" {
+ mountImages = append(mountImages, image)
+ }
+ case "tmpfs":
+ mount, err := b.getTmpfsMount(tokens, idMaps)
+ if err != nil {
+ return nil, nil, err
+ }
+ finalMounts = append(finalMounts, *mount)
+ mountTargets = append(mountTargets, mount.Destination)
+ case "cache":
+ mount, lockedPaths, err := b.getCacheMount(tokens, sources.StageMountPoints, idMaps)
+ if err != nil {
+ return nil, nil, err
+ }
+ finalMounts = append(finalMounts, *mount)
+ mountTargets = append(mountTargets, mount.Destination)
+ lockedTargets = lockedPaths
+ default:
+ return nil, nil, fmt.Errorf("invalid mount type %q", kv[1])
+ }
+ }
+ artifacts := &runMountArtifacts{
+ RunMountTargets: mountTargets,
+ TmpFiles: tmpFiles,
+ Agents: agents,
+ MountedImages: mountImages,
+ SSHAuthSock: defaultSSHSock,
+ LockedTargets: lockedTargets,
+ }
+ return finalMounts, artifacts, nil
+}
+
+func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, string, error) {
+ if contextDir == "" {
+ return nil, "", errors.New("Context Directory for current run invocation is not configured")
+ }
+ var optionMounts []specs.Mount
+ mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints)
+ if err != nil {
+ return nil, image, err
+ }
+ optionMounts = append(optionMounts, mount)
+ volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
+ if err != nil {
+ return nil, image, err
+ }
+ return &volumes[0], image, nil
+}
+
+func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*spec.Mount, error) {
+ var optionMounts []specs.Mount
+ mount, err := internalParse.GetTmpfsMount(tokens)
+ if err != nil {
+ return nil, err
+ }
+ optionMounts = append(optionMounts, mount)
+ volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
+ if err != nil {
+ return nil, err
+ }
+ return &volumes[0], nil
+}
+
+func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secret, idMaps IDMaps) (*spec.Mount, string, error) {
+ errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
+ if len(tokens) == 0 {
+ return nil, "", errInvalidSyntax
+ }
+ var err error
+ var id, target string
+ var required bool
+ var uid, gid uint32
+ var mode uint32 = 0400
+ for _, val := range tokens {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "id":
+ id = kv[1]
+ case "target", "dst", "destination":
+ target = kv[1]
+ case "required":
+ required, err = strconv.ParseBool(kv[1])
+ if err != nil {
+ return nil, "", errInvalidSyntax
+ }
+ case "mode":
+ mode64, err := strconv.ParseUint(kv[1], 8, 32)
+ if err != nil {
+ return nil, "", errInvalidSyntax
+ }
+ mode = uint32(mode64)
+ case "uid":
+ uid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, "", errInvalidSyntax
+ }
+ uid = uint32(uid64)
+ case "gid":
+ gid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, "", errInvalidSyntax
+ }
+ gid = uint32(gid64)
+ default:
+ return nil, "", errInvalidSyntax
+ }
+ }
+
+ if id == "" {
+ return nil, "", errInvalidSyntax
+ }
+ // Default location for secretis is /run/secrets/id
+ if target == "" {
+ target = "/run/secrets/" + id
+ }
+
+ secr, ok := secrets[id]
+ if !ok {
+ if required {
+ return nil, "", fmt.Errorf("secret required but no secret with id %s found", id)
+ }
+ return nil, "", nil
+ }
+ var data []byte
+ var envFile string
+ var ctrFileOnHost string
+
+ switch secr.SourceType {
+ case "env":
+ data = []byte(os.Getenv(secr.Source))
+ tmpFile, err := ioutil.TempFile(define.TempDir, "buildah*")
+ if err != nil {
+ return nil, "", err
+ }
+ envFile = tmpFile.Name()
+ ctrFileOnHost = tmpFile.Name()
+ case "file":
+ containerWorkingDir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return nil, "", err
+ }
+ data, err = ioutil.ReadFile(secr.Source)
+ if err != nil {
+ return nil, "", err
+ }
+ ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id)
+ default:
+ return nil, "", errors.New("invalid source secret type")
+ }
+
+ // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod,
+ // chown and relabel it for the container user and we don't want to mess with the original file
+ if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil {
+ return nil, "", err
+ }
+ if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil {
+ return nil, "", err
+ }
+
+ if err := label.Relabel(ctrFileOnHost, b.MountLabel, false); err != nil {
+ return nil, "", err
+ }
+ hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
+ if err != nil {
+ return nil, "", err
+ }
+ if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil {
+ return nil, "", err
+ }
+ if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil {
+ return nil, "", err
+ }
+ newMount := specs.Mount{
+ Destination: target,
+ Type: define.TypeBind,
+ Source: ctrFileOnHost,
+ Options: append(define.BindOptions, "rprivate", "ro"),
+ }
+ return &newMount, envFile, nil
+}
+
+// getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container
+func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, idMaps IDMaps) (*spec.Mount, *sshagent.AgentServer, error) {
+ errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
+
+ var err error
+ var id, target string
+ var required bool
+ var uid, gid uint32
+ var mode uint32 = 400
+ for _, val := range tokens {
+ kv := strings.SplitN(val, "=", 2)
+ if len(kv) < 2 {
+ return nil, nil, errInvalidSyntax
+ }
+ switch kv[0] {
+ case "id":
+ id = kv[1]
+ case "target", "dst", "destination":
+ target = kv[1]
+ case "required":
+ required, err = strconv.ParseBool(kv[1])
+ if err != nil {
+ return nil, nil, errInvalidSyntax
+ }
+ case "mode":
+ mode64, err := strconv.ParseUint(kv[1], 8, 32)
+ if err != nil {
+ return nil, nil, errInvalidSyntax
+ }
+ mode = uint32(mode64)
+ case "uid":
+ uid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, nil, errInvalidSyntax
+ }
+ uid = uint32(uid64)
+ case "gid":
+ gid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, nil, errInvalidSyntax
+ }
+ gid = uint32(gid64)
+ default:
+ return nil, nil, errInvalidSyntax
+ }
+ }
+
+ if id == "" {
+ id = "default"
+ }
+ // Default location for secretis is /run/buildkit/ssh_agent.{i}
+ if target == "" {
+ target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count)
+ }
+
+ sshsource, ok := sshsources[id]
+ if !ok {
+ if required {
+ return nil, nil, fmt.Errorf("ssh required but no ssh with id %s found", id)
+ }
+ return nil, nil, nil
+ }
+ // Create new agent from keys or socket
+ fwdAgent, err := sshagent.NewAgentServer(sshsource)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Start ssh server, and get the host sock we're mounting in the container
+ hostSock, err := fwdAgent.Serve(b.ProcessLabel)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := label.Relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ if err := label.Relabel(hostSock, b.MountLabel, false); err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
+ if err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ newMount := specs.Mount{
+ Destination: target,
+ Type: define.TypeBind,
+ Source: hostSock,
+ Options: append(define.BindOptions, "rprivate", "ro"),
+ }
+ return &newMount, fwdAgent, nil
+}
+
+func (b *Builder) cleanupTempVolumes() {
+ for tempVolume, val := range b.TempVolumes {
+ if val {
+ if err := overlay.RemoveTemp(tempVolume); err != nil {
+ b.Logger.Errorf(err.Error())
+ }
+ b.TempVolumes[tempVolume] = false
+ }
+ }
+}
+
+// cleanupRunMounts cleans up run mounts so they only appear in this run.
+func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error {
+ for _, agent := range artifacts.Agents {
+ err := agent.Shutdown()
+ if err != nil {
+ return err
+ }
+ }
+
+ //cleanup any mounted images for this run
+ for _, image := range artifacts.MountedImages {
+ if image != "" {
+ // if flow hits here some image was mounted for this run
+ i, err := internalUtil.LookupImage(context, b.store, image)
+ if err == nil {
+ // silently try to unmount and do nothing
+ // if image is being used by something else
+ _ = i.Unmount(false)
+ }
+ if errors.Is(err, storageTypes.ErrImageUnknown) {
+ // Ignore only if ErrImageUnknown
+ // Reason: Image is already unmounted do nothing
+ continue
+ }
+ return err
+ }
+ }
+
+ opts := copier.RemoveOptions{
+ All: true,
+ }
+ for _, path := range artifacts.RunMountTargets {
+ err := copier.Remove(mountpoint, path, opts)
+ if err != nil {
+ return err
+ }
+ }
+ var prevErr error
+ for _, path := range artifacts.TmpFiles {
+ err := os.Remove(path)
+ if !os.IsNotExist(err) {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ }
+ // unlock if any locked files from this RUN statement
+ for _, path := range artifacts.LockedTargets {
+ _, err := os.Stat(path)
+ if err != nil {
+ // Lockfile not found this might be a problem,
+ // since LockedTargets must contain list of all locked files
+ // don't break here since we need to unlock other files but
+ // log so user can take a look
+ logrus.Warnf("Lockfile %q was expected here, stat failed with %v", path, err)
+ continue
+ }
+ lockfile, err := lockfile.GetLockfile(path)
+ if err != nil {
+ // unable to get lockfile
+ // lets log error and continue
+ // unlocking other files
+ logrus.Warn(err)
+ continue
+ }
+ if lockfile.Locked() {
+ lockfile.Unlock()
+ } else {
+ logrus.Warnf("Lockfile %q was expected to be locked, this is unexpected", path)
+ continue
+ }
+ }
+ return prevErr
+}
diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go
new file mode 100644
index 000000000..c9384d2d2
--- /dev/null
+++ b/vendor/github.com/containers/buildah/run_freebsd.go
@@ -0,0 +1,548 @@
+//go:build freebsd
+// +build freebsd
+
+package buildah
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/containers/buildah/bind"
+ "github.com/containers/buildah/chroot"
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal"
+ "github.com/containers/buildah/pkg/jail"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libnetwork/resolvconf"
+ nettypes "github.com/containers/common/libnetwork/types"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/docker/go-units"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ P_PID = 0
+ P_PGID = 2
+ PROC_REAP_ACQUIRE = 2
+ PROC_REAP_RELEASE = 3
+)
+
+var (
+ // We dont want to remove destinations with /etc, /dev as
+ // rootfs already contains these files and unionfs will create
+ // a `whiteout` i.e `.wh` files on removal of overlapping
+ // files from these directories. everything other than these
+ // will be cleaned up
+ nonCleanablePrefixes = []string{
+ "/etc", "/dev",
+ }
+)
+
+func procctl(idtype int, id int, cmd int, arg *byte) error {
+ _, _, e1 := unix.Syscall6(
+ unix.SYS_PROCCTL, uintptr(idtype), uintptr(id),
+ uintptr(cmd), uintptr(unsafe.Pointer(arg)), 0, 0)
+ if e1 != 0 {
+ return unix.Errno(e1)
+ }
+ return nil
+}
+
+func setChildProcess() error {
+ if err := procctl(P_PID, unix.Getpid(), PROC_REAP_ACQUIRE, nil); err != nil {
+ fmt.Fprintf(os.Stderr, "procctl(PROC_REAP_ACQUIRE): %v\n", err)
+ return err
+ }
+ return nil
+}
+
+func (b *Builder) Run(command []string, options RunOptions) error {
+ p, err := ioutil.TempDir("", Package)
+ if err != nil {
+ return err
+ }
+ // On some hosts like AH, /tmp is a symlink and we need an
+ // absolute path.
+ path, err := filepath.EvalSymlinks(p)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("using %q to hold bundle data", path)
+ defer func() {
+ if err2 := os.RemoveAll(path); err2 != nil {
+ logrus.Errorf("error removing %q: %v", path, err2)
+ }
+ }()
+
+ gp, err := generate.New("freebsd")
+ if err != nil {
+ return fmt.Errorf("error generating new 'freebsd' runtime spec: %w", err)
+ }
+ g := &gp
+
+ isolation := options.Isolation
+ if isolation == IsolationDefault {
+ isolation = b.Isolation
+ if isolation == IsolationDefault {
+ isolation = IsolationOCI
+ }
+ }
+ if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil {
+ return err
+ }
+
+ // hardwire the environment to match docker build to avoid subtle and hard-to-debug differences due to containers.conf
+ b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"})
+
+ if b.CommonBuildOpts == nil {
+ return fmt.Errorf("invalid format on container you must recreate the container")
+ }
+
+ if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil {
+ return err
+ }
+
+ if options.WorkingDir != "" {
+ g.SetProcessCwd(options.WorkingDir)
+ } else if b.WorkDir() != "" {
+ g.SetProcessCwd(b.WorkDir())
+ }
+ mountPoint, err := b.Mount(b.MountLabel)
+ if err != nil {
+ return fmt.Errorf("error mounting container %q: %w", b.ContainerID, err)
+ }
+ defer func() {
+ if err := b.Unmount(); err != nil {
+ logrus.Errorf("error unmounting container: %v", err)
+ }
+ }()
+ g.SetRootPath(mountPoint)
+ if len(command) > 0 {
+ command = runLookupPath(g, command)
+ g.SetProcessArgs(command)
+ } else {
+ g.SetProcessArgs(nil)
+ }
+
+ setupTerminal(g, options.Terminal, options.TerminalSize)
+
+ configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options)
+ if err != nil {
+ return err
+ }
+
+ containerName := Package + "-" + filepath.Base(path)
+ if configureNetwork {
+ g.AddAnnotation("org.freebsd.parentJail", containerName+"-vnet")
+ }
+
+ homeDir, err := b.configureUIDGID(g, mountPoint, options)
+ if err != nil {
+ return err
+ }
+
+ // Now grab the spec from the generator. Set the generator to nil so that future contributors
+ // will quickly be able to tell that they're supposed to be modifying the spec directly from here.
+ spec := g.Config
+ g = nil
+
+ // Set the seccomp configuration using the specified profile name. Some syscalls are
+ // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot),
+ // so we sorted out the capabilities lists first.
+ if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil {
+ return err
+ }
+
+ uid, gid := spec.Process.User.UID, spec.Process.User.GID
+ idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
+
+ mode := os.FileMode(0755)
+ coptions := copier.MkdirOptions{
+ ChownNew: idPair,
+ ChmodNew: &mode,
+ }
+ if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil {
+ return err
+ }
+
+ bindFiles := make(map[string]string)
+ volumes := b.Volumes()
+
+ // Figure out who owns files that will appear to be owned by UID/GID 0 in the container.
+ rootUID, rootGID, err := util.GetHostRootIDs(spec)
+ if err != nil {
+ return err
+ }
+ rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
+
+ hostFile := ""
+ if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled {
+ hostFile, err = b.generateHosts(path, rootIDPair, mountPoint)
+ if err != nil {
+ return err
+ }
+ bindFiles[config.DefaultHostsFile] = hostFile
+ }
+
+ if !contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") {
+ resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, nil)
+ if err != nil {
+ return err
+ }
+ bindFiles[resolvconf.DefaultResolvConf] = resolvFile
+ }
+
+ runMountInfo := runMountInfo{
+ ContextDir: options.ContextDir,
+ Secrets: options.Secrets,
+ SSHSources: options.SSHSources,
+ StageMountPoints: options.StageMountPoints,
+ SystemContext: options.SystemContext,
+ }
+
+ runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
+ if err != nil {
+ return fmt.Errorf("error resolving mountpoints for container %q: %w", b.ContainerID, err)
+ }
+ if runArtifacts.SSHAuthSock != "" {
+ sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock
+ spec.Process.Env = append(spec.Process.Env, sshenv)
+ }
+
+ // following run was called from `buildah run`
+ // and some images were mounted for this run
+ // add them to cleanup artifacts
+ if len(options.ExternalImageMounts) > 0 {
+ runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...)
+ }
+
+ defer func() {
+ if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil {
+ options.Logger.Errorf("unable to cleanup run mounts %v", err)
+ }
+ }()
+
+ defer b.cleanupTempVolumes()
+
+ // If we are creating a network, make the vnet here so that we
+ // can execute the OCI runtime inside it.
+ if configureNetwork {
+ mynetns := containerName + "-vnet"
+
+ jconf := jail.NewConfig()
+ jconf.Set("name", mynetns)
+ jconf.Set("vnet", jail.NEW)
+ jconf.Set("children.max", 1)
+ jconf.Set("persist", true)
+ jconf.Set("enforce_statfs", 0)
+ jconf.Set("devfs_ruleset", 4)
+ jconf.Set("allow.raw_sockets", true)
+ jconf.Set("allow.mount", true)
+ jconf.Set("allow.mount.devfs", true)
+ jconf.Set("allow.mount.nullfs", true)
+ jconf.Set("allow.mount.fdescfs", true)
+ jconf.Set("securelevel", -1)
+ netjail, err := jail.Create(jconf)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ jconf := jail.NewConfig()
+ jconf.Set("persist", false)
+ err2 := netjail.Set(jconf)
+ if err2 != nil {
+ logrus.Errorf("error releasing vnet jail %q: %v", mynetns, err2)
+ }
+ }()
+ }
+
+ switch isolation {
+ case IsolationOCI:
+ var moreCreateArgs []string
+ if options.NoPivot {
+ moreCreateArgs = []string{"--no-pivot"}
+ } else {
+ moreCreateArgs = nil
+ }
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, containerName, b.Container, hostFile)
+ case IsolationChroot:
+ err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr)
+ default:
+ err = errors.New("don't know how to run this command")
+ }
+ return err
+}
+
+func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error {
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return fmt.Errorf("failed to get container config: %w", err)
+ }
+ // Other process resource limits
+ if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Resources: %#v", commonOpts)
+ return nil
+}
+
+// setupSpecialMountSpecChanges creates special mounts for depending
+// on the namespaces - nothing yet for freebsd
+func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Mount, error) {
+ return spec.Mounts, nil
+}
+
+func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, []string, error) {
+ return nil, nil, errors.New("cache mounts not supported on freebsd")
+}
+
+func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {
+ // Make sure the overlay directory is clean before running
+ _, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up container directory for %s: %w", b.ContainerID, err)
+ }
+
+ parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
+ var foundrw, foundro bool
+ for _, opt := range options {
+ switch opt {
+ case "rw":
+ foundrw = true
+ case "ro":
+ foundro = true
+ }
+ }
+ if !foundrw && !foundro {
+ options = append(options, "rw")
+ }
+ if mountType == "bind" || mountType == "rbind" {
+ mountType = "nullfs"
+ }
+ return specs.Mount{
+ Destination: container,
+ Type: mountType,
+ Source: host,
+ Options: options,
+ }, nil
+ }
+
+ // Bind mount volumes specified for this particular Run() invocation
+ for _, i := range optionMounts {
+ logrus.Debugf("setting up mounted volume at %q", i.Destination)
+ mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options)
+ if err != nil {
+ return nil, err
+ }
+ mounts = append(mounts, mount)
+ }
+ // Bind mount volumes given by the user when the container was created
+ for _, i := range volumeMounts {
+ var options []string
+ spliti := strings.Split(i, ":")
+ if len(spliti) > 2 {
+ options = strings.Split(spliti[2], ",")
+ }
+ options = append(options, "bind")
+ mount, err := parseMount("bind", spliti[0], spliti[1], options)
+ if err != nil {
+ return nil, err
+ }
+ mounts = append(mounts, mount)
+ }
+ return mounts, nil
+}
+
+func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error {
+ return nil
+}
+
+func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) {
+ //if isolation == IsolationOCIRootless {
+ //return setupRootlessNetwork(pid)
+ //}
+
+ if len(configureNetworks) == 0 {
+ configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()}
+ }
+ logrus.Debugf("configureNetworks: %v", configureNetworks)
+
+ mynetns := containerName + "-vnet"
+
+ networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks))
+ for i, network := range configureNetworks {
+ networks[network] = nettypes.PerNetworkOptions{
+ InterfaceName: fmt.Sprintf("eth%d", i),
+ }
+ }
+
+ opts := nettypes.NetworkOptions{
+ ContainerID: containerName,
+ ContainerName: containerName,
+ Networks: networks,
+ }
+ _, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ teardown = func() {
+ err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts})
+ if err != nil {
+ logrus.Errorf("failed to cleanup network: %v", err)
+ }
+ }
+
+ return teardown, nil, nil
+}
+
+func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) {
+ // Set namespace options in the container configuration.
+ for _, namespaceOption := range namespaceOptions {
+ switch namespaceOption.Name {
+ case string(specs.NetworkNamespace):
+ configureNetwork = false
+ if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) {
+ if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) {
+ configureNetworks = strings.Split(namespaceOption.Path, ",")
+ namespaceOption.Path = ""
+ }
+ configureNetwork = (policy != define.NetworkDisabled)
+ }
+ case string(specs.UTSNamespace):
+ configureUTS = false
+ if !namespaceOption.Host && namespaceOption.Path == "" {
+ configureUTS = true
+ }
+ }
+ // TODO: re-visit this when there is consensus on a
+ // FreeBSD runtime-spec. FreeBSD jails have rough
+ // equivalents for UTS and and network namespaces.
+ }
+
+ return configureNetwork, configureNetworks, configureUTS, nil
+}
+
+func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) {
+ defaultNamespaceOptions, err := DefaultNamespaceOptions()
+ if err != nil {
+ return false, nil, err
+ }
+
+ namespaceOptions := defaultNamespaceOptions
+ namespaceOptions.AddOrReplace(b.NamespaceOptions...)
+ namespaceOptions.AddOrReplace(options.NamespaceOptions...)
+
+ networkPolicy := options.ConfigureNetwork
+ //Nothing was specified explicitly so network policy should be inherited from builder
+ if networkPolicy == NetworkDefault {
+ networkPolicy = b.ConfigureNetwork
+
+ // If builder policy was NetworkDisabled and
+ // we want to disable network for this run.
+ // reset options.ConfigureNetwork to NetworkDisabled
+ // since it will be treated as source of truth later.
+ if networkPolicy == NetworkDisabled {
+ options.ConfigureNetwork = networkPolicy
+ }
+ }
+
+ configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
+ if err != nil {
+ return false, nil, err
+ }
+
+ if configureUTS {
+ if options.Hostname != "" {
+ g.SetHostname(options.Hostname)
+ } else if b.Hostname() != "" {
+ g.SetHostname(b.Hostname())
+ } else {
+ g.SetHostname(stringid.TruncateID(b.ContainerID))
+ }
+ } else {
+ g.SetHostname("")
+ }
+
+ found := false
+ spec := g.Config
+ for i := range spec.Process.Env {
+ if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") {
+ found = true
+ break
+ }
+ }
+ if !found {
+ spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
+ }
+
+ return configureNetwork, configureNetworks, nil
+}
+
+func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) {
+ for dest, src := range bindFiles {
+ options := []string{}
+ if strings.HasPrefix(src, bundlePath) {
+ options = append(options, bind.NoBindOption)
+ }
+ mounts = append(mounts, specs.Mount{
+ Source: src,
+ Destination: dest,
+ Type: "nullfs",
+ Options: options,
+ })
+ }
+ return mounts
+}
+
+func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error {
+ var (
+ ul *units.Ulimit
+ err error
+ )
+
+ ulimit = append(defaultUlimits, ulimit...)
+ for _, u := range ulimit {
+ if ul, err = units.ParseUlimit(u); err != nil {
+ return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
+ }
+
+ g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft))
+ }
+ return nil
+}
+
+// setPdeathsig sets a parent-death signal for the process
+func setPdeathsig(cmd *exec.Cmd) {
+ if cmd.SysProcAttr == nil {
+ cmd.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
+}
+
+// Create pipes to use for relaying stdio.
+func runMakeStdioPipe(uid, gid int) ([][]int, error) {
+ stdioPipe := make([][]int, 3)
+ for i := range stdioPipe {
+ stdioPipe[i] = make([]int, 2)
+ if err := unix.Pipe(stdioPipe[i]); err != nil {
+ return nil, fmt.Errorf("error creating pipe for container FD %d: %w", i, err)
+ }
+ }
+ return stdioPipe, nil
+}
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 3d2a83f55..100e223f9 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -4,21 +4,15 @@
package buildah
import (
- "bytes"
- "encoding/json"
+ "context"
+ "errors"
"fmt"
- "io"
"io/ioutil"
- "net"
"os"
"os/exec"
- "os/signal"
"path/filepath"
- "runtime"
"strconv"
"strings"
- "sync"
- "sync/atomic"
"syscall"
"time"
@@ -28,43 +22,43 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalParse "github.com/containers/buildah/internal/parse"
- internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse"
- "github.com/containers/buildah/pkg/sshagent"
"github.com/containers/buildah/util"
- "github.com/containers/common/libnetwork/etchosts"
- "github.com/containers/common/libnetwork/network"
"github.com/containers/common/libnetwork/resolvconf"
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/capabilities"
"github.com/containers/common/pkg/chown"
"github.com/containers/common/pkg/config"
- "github.com/containers/common/pkg/subscriptions"
- imagetypes "github.com/containers/image/v5/types"
- "github.com/containers/storage"
+ "github.com/containers/common/pkg/hooks"
+ hooksExec "github.com/containers/common/pkg/hooks/exec"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
- "github.com/containers/storage/pkg/lockfile"
- "github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/unshare"
- storagetypes "github.com/containers/storage/types"
"github.com/docker/go-units"
- "github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
- "golang.org/x/term"
)
// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures.
type ContainerDevices define.ContainerDevices
+var (
+ // We dont want to remove destinations with /etc, /dev, /sys,
+ // /proc as rootfs already contains these files and unionfs
+ // will create a `whiteout` i.e `.wh` files on removal of
+ // overlapping files from these directories. everything other
+ // than these will be cleaned up
+ nonCleanablePrefixes = []string{
+ "/etc", "/dev", "/sys", "/proc",
+ }
+)
+
func setChildProcess() error {
if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil {
fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err)
@@ -94,7 +88,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
gp, err := generate.New("linux")
if err != nil {
- return errors.Wrapf(err, "error generating new 'linux' runtime spec")
+ return fmt.Errorf("error generating new 'linux' runtime spec: %w", err)
}
g := &gp
@@ -113,7 +107,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"})
if b.CommonBuildOpts == nil {
- return errors.Errorf("Invalid format on container you must recreate the container")
+ return fmt.Errorf("invalid format on container you must recreate the container")
}
if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil {
@@ -128,7 +122,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
setupSelinux(g, b.ProcessLabel, b.MountLabel)
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
- return errors.Wrapf(err, "error mounting container %q", b.ContainerID)
+ return fmt.Errorf("error mounting container %q: %w", b.ContainerID, err)
}
defer func() {
if err := b.Unmount(); err != nil {
@@ -317,6 +311,12 @@ rootless=%d
bindFiles["/run/.containerenv"] = containerenvPath
}
+ // Setup OCI hooks
+ _, err = b.setupOCIHooks(spec, (len(options.Mounts) > 0 || len(volumes) > 0))
+ if err != nil {
+ return fmt.Errorf("unable to setup OCI hooks: %w", err)
+ }
+
runMountInfo := runMountInfo{
ContextDir: options.ContextDir,
Secrets: options.Secrets,
@@ -327,7 +327,7 @@ rootless=%d
runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
if err != nil {
- return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID)
+ return fmt.Errorf("error resolving mountpoints for container %q: %w", b.ContainerID, err)
}
if runArtifacts.SSHAuthSock != "" {
sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock
@@ -367,11 +367,59 @@ rootless=%d
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec,
mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile)
default:
- err = errors.Errorf("don't know how to run this command")
+ err = errors.New("don't know how to run this command")
}
return err
}
+func (b *Builder) setupOCIHooks(config *spec.Spec, hasVolumes bool) (map[string][]spec.Hook, error) {
+ allHooks := make(map[string][]spec.Hook)
+ if len(b.CommonBuildOpts.OCIHooksDir) == 0 {
+ if unshare.IsRootless() {
+ return nil, nil
+ }
+ for _, hDir := range []string{hooks.DefaultDir, hooks.OverrideDir} {
+ manager, err := hooks.New(context.Background(), []string{hDir}, []string{})
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ ociHooks, err := manager.Hooks(config, b.ImageAnnotations, hasVolumes)
+ if err != nil {
+ return nil, err
+ }
+ if len(ociHooks) > 0 || config.Hooks != nil {
+ logrus.Warnf("Implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
+ }
+ for i, hook := range ociHooks {
+ allHooks[i] = hook
+ }
+ }
+ } else {
+ manager, err := hooks.New(context.Background(), b.CommonBuildOpts.OCIHooksDir, []string{})
+ if err != nil {
+ return nil, err
+ }
+
+ allHooks, err = manager.Hooks(config, b.ImageAnnotations, hasVolumes)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ hookErr, err := hooksExec.RuntimeConfigFilter(context.Background(), allHooks["precreate"], config, hooksExec.DefaultPostKillTimeout)
+ if err != nil {
+ logrus.Warnf("Container: precreate hook: %v", err)
+ if hookErr != nil && hookErr != err {
+ logrus.Debugf("container: precreate hook (hook error): %v", hookErr)
+ }
+ return nil, err
+ }
+ return allHooks, nil
+}
+
func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error {
// Resources - CPU
if commonOpts.CPUPeriod != 0 {
@@ -405,7 +453,7 @@ func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Gene
defaultContainerConfig, err := config.Default()
if err != nil {
- return errors.Wrapf(err, "failed to get container config")
+ return fmt.Errorf("failed to get container config: %w", err)
}
// Other process resource limits
if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits); err != nil {
@@ -416,657 +464,6 @@ func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Gene
return nil
}
-func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
- var mounts []specs.Mount
- hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
- // Add temporary copies of the contents of volume locations at the
- // volume locations, unless we already have something there.
- for _, volume := range builtinVolumes {
- volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex())
- initializeVolume := false
- // If we need to, create the directory that we'll use to hold
- // the volume contents. If we do need to create it, then we'll
- // need to populate it, too, so make a note of that.
- if _, err := os.Stat(volumePath); err != nil {
- if !os.IsNotExist(err) {
- return nil, err
- }
- logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume)
- if err = os.MkdirAll(volumePath, 0755); err != nil {
- return nil, err
- }
- if err = label.Relabel(volumePath, mountLabel, false); err != nil {
- return nil, err
- }
- initializeVolume = true
- }
- // Make sure the volume exists in the rootfs and read its attributes.
- createDirPerms := os.FileMode(0755)
- err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{
- ChownNew: &hostOwner,
- ChmodNew: &createDirPerms,
- })
- if err != nil {
- return nil, errors.Wrapf(err, "ensuring volume path %q", filepath.Join(mountPoint, volume))
- }
- srcPath, err := copier.Eval(mountPoint, filepath.Join(mountPoint, volume), copier.EvalOptions{})
- if err != nil {
- return nil, errors.Wrapf(err, "evaluating path %q", srcPath)
- }
- stat, err := os.Stat(srcPath)
- if err != nil && !os.IsNotExist(err) {
- return nil, err
- }
- // If we need to populate the mounted volume's contents with
- // content from the rootfs, set it up now.
- if initializeVolume {
- if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
- return nil, err
- }
- if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
- return nil, err
- }
- logrus.Debugf("populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
- if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
- return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
- }
- }
- // Add the bind mount.
- mounts = append(mounts, specs.Mount{
- Source: volumePath,
- Destination: volume,
- Type: "bind",
- Options: []string{"bind"},
- })
- }
- return mounts, nil
-}
-
-func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) {
- // Start building a new list of mounts.
- var mounts []specs.Mount
- haveMount := func(destination string) bool {
- for _, mount := range mounts {
- if mount.Destination == destination {
- // Already have something to mount there.
- return true
- }
- }
- return false
- }
-
- specMounts, err := setupSpecialMountSpecChanges(spec, b.CommonBuildOpts.ShmSize)
- if err != nil {
- return nil, err
- }
-
- // Get the list of files we need to bind into the container.
- bindFileMounts := runSetupBoundFiles(bundlePath, bindFiles)
-
- // After this point we need to know the per-container persistent storage directory.
- cdir, err := b.store.ContainerDirectory(b.ContainerID)
- if err != nil {
- return nil, errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
- }
-
- // Figure out which UID and GID to tell the subscriptions package to use
- // for files that it creates.
- rootUID, rootGID, err := util.GetHostRootIDs(spec)
- if err != nil {
- return nil, err
- }
-
- // Get host UID and GID of the container process.
- processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID)
- if err != nil {
- return nil, err
- }
-
- // Get the list of subscriptions mounts.
- subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false)
-
- idMaps := IDMaps{
- uidmap: spec.Linux.UIDMappings,
- gidmap: spec.Linux.GIDMappings,
- rootUID: int(rootUID),
- rootGID: int(rootGID),
- processUID: int(processUID),
- processGID: int(processGID),
- }
- // Get the list of mounts that are just for this Run() call.
- runMounts, mountArtifacts, err := b.runSetupRunMounts(runFileMounts, runMountInfo, idMaps)
- if err != nil {
- return nil, err
- }
- // Add temporary copies of the contents of volume locations at the
- // volume locations, unless we already have something there.
- builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
- if err != nil {
- return nil, err
- }
-
- // Get the list of explicitly-specified volume mounts.
- volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, idMaps)
- if err != nil {
- return nil, err
- }
-
- // prepare list of mount destinations which can be cleaned up safely.
- // we can clean bindFiles, subscriptionMounts and specMounts
- // everything other than these might have users content
- mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...)
-
- allMounts := util.SortMounts(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...))
- // Add them all, in the preferred order, except where they conflict with something that was previously added.
- for _, mount := range allMounts {
- if haveMount(mount.Destination) {
- // Already mounting something there, no need to bother with this one.
- continue
- }
- // Add the mount.
- mounts = append(mounts, mount)
- }
-
- // Set the list in the spec.
- spec.Mounts = mounts
- return mountArtifacts, nil
-}
-
-// Destinations which can be cleaned up after every RUN
-func cleanableDestinationListFromMounts(mounts []spec.Mount) []string {
- mountDest := []string{}
- for _, mount := range mounts {
- // Add all destination to mountArtifacts so that they can be cleaned up later
- if mount.Destination != "" {
- // we dont want to remove destinations with /etc, /dev, /sys, /proc as rootfs already contains these files
- // and unionfs will create a `whiteout` i.e `.wh` files on removal of overlapping files from these directories.
- // everything other than these will be cleanedup
- if !strings.HasPrefix(mount.Destination, "/etc") && !strings.HasPrefix(mount.Destination, "/dev") && !strings.HasPrefix(mount.Destination, "/sys") && !strings.HasPrefix(mount.Destination, "/proc") {
- mountDest = append(mountDest, mount.Destination)
- }
- }
- }
- return mountDest
-}
-
-// addResolvConf copies files from host and sets them up to bind mount into container
-func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaces []specs.LinuxNamespace) (string, error) {
- defaultConfig, err := config.Default()
- if err != nil {
- return "", errors.Wrapf(err, "failed to get config")
- }
-
- nameservers := make([]string, 0, len(defaultConfig.Containers.DNSServers)+len(dnsServers))
- nameservers = append(nameservers, defaultConfig.Containers.DNSServers...)
- nameservers = append(nameservers, dnsServers...)
-
- keepHostServers := false
- // special check for slirp ip
- if len(nameservers) == 0 && b.Isolation == IsolationOCIRootless {
- for _, ns := range namespaces {
- if ns.Type == specs.NetworkNamespace && ns.Path == "" {
- keepHostServers = true
- // if we are using slirp4netns, also add the built-in DNS server.
- logrus.Debugf("adding slirp4netns 10.0.2.3 built-in DNS server")
- nameservers = append([]string{"10.0.2.3"}, nameservers...)
- }
- }
- }
-
- searches := make([]string, 0, len(defaultConfig.Containers.DNSSearches)+len(dnsSearch))
- searches = append(searches, defaultConfig.Containers.DNSSearches...)
- searches = append(searches, dnsSearch...)
-
- options := make([]string, 0, len(defaultConfig.Containers.DNSOptions)+len(dnsOptions))
- options = append(options, defaultConfig.Containers.DNSOptions...)
- options = append(options, dnsOptions...)
-
- cfile := filepath.Join(rdir, "resolv.conf")
- if err := resolvconf.New(&resolvconf.Params{
- Path: cfile,
- Namespaces: namespaces,
- IPv6Enabled: true, // TODO we should check if we have ipv6
- KeepHostServers: keepHostServers,
- Nameservers: nameservers,
- Searches: searches,
- Options: options,
- }); err != nil {
- return "", errors.Wrapf(err, "error building resolv.conf for container %s", b.ContainerID)
- }
-
- uid := 0
- gid := 0
- if chownOpts != nil {
- uid = chownOpts.UID
- gid = chownOpts.GID
- }
- if err = os.Chown(cfile, uid, gid); err != nil {
- return "", err
- }
-
- if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
- return "", err
- }
- return cfile, nil
-}
-
-// generateHosts creates a containers hosts file
-func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string) (string, error) {
- conf, err := config.Default()
- if err != nil {
- return "", err
- }
-
- path, err := etchosts.GetBaseHostFile(conf.Containers.BaseHostsFile, imageRoot)
- if err != nil {
- return "", err
- }
-
- targetfile := filepath.Join(rdir, "hosts")
- if err := etchosts.New(&etchosts.Params{
- BaseFile: path,
- ExtraHosts: b.CommonBuildOpts.AddHost,
- HostContainersInternalIP: etchosts.GetHostContainersInternalIP(conf, nil, nil),
- TargetFile: targetfile,
- }); err != nil {
- return "", err
- }
-
- uid := 0
- gid := 0
- if chownOpts != nil {
- uid = chownOpts.UID
- gid = chownOpts.GID
- }
- if err = os.Chown(targetfile, uid, gid); err != nil {
- return "", err
- }
- if err := label.Relabel(targetfile, b.MountLabel, false); err != nil {
- return "", err
- }
-
- return targetfile, nil
-}
-
-// generateHostname creates a containers /etc/hostname file
-func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDPair) (string, error) {
- var err error
- hostnamePath := "/etc/hostname"
-
- var hostnameBuffer bytes.Buffer
- hostnameBuffer.Write([]byte(fmt.Sprintf("%s\n", hostname)))
-
- cfile := filepath.Join(rdir, filepath.Base(hostnamePath))
- if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil {
- return "", errors.Wrapf(err, "error writing /etc/hostname into the container")
- }
-
- uid := 0
- gid := 0
- if chownOpts != nil {
- uid = chownOpts.UID
- gid = chownOpts.GID
- }
- if err = os.Chown(cfile, uid, gid); err != nil {
- return "", err
- }
- if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
- return "", err
- }
-
- return cfile, nil
-}
-
-func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) {
- switch terminalPolicy {
- case DefaultTerminal:
- onTerminal := term.IsTerminal(unix.Stdin) && term.IsTerminal(unix.Stdout) && term.IsTerminal(unix.Stderr)
- if onTerminal {
- logrus.Debugf("stdio is a terminal, defaulting to using a terminal")
- } else {
- logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal")
- }
- g.SetProcessTerminal(onTerminal)
- case WithTerminal:
- g.SetProcessTerminal(true)
- case WithoutTerminal:
- g.SetProcessTerminal(false)
- }
- if terminalSize != nil {
- g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height)
- }
-}
-
-func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string,
- containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) {
- if options.Logger == nil {
- options.Logger = logrus.StandardLogger()
- }
-
- // Lock the caller to a single OS-level thread.
- runtime.LockOSThread()
-
- // Set up bind mounts for things that a namespaced user might not be able to get to directly.
- unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath)
- if unmountAll != nil {
- defer func() {
- if err := unmountAll(); err != nil {
- options.Logger.Error(err)
- }
- }()
- }
- if err != nil {
- return 1, err
- }
-
- // Write the runtime configuration.
- specbytes, err := json.Marshal(spec)
- if err != nil {
- return 1, errors.Wrapf(err, "error encoding configuration %#v as json", spec)
- }
- if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
- return 1, errors.Wrapf(err, "error storing runtime configuration")
- }
-
- logrus.Debugf("config = %v", string(specbytes))
-
- // Decide which runtime to use.
- runtime := options.Runtime
- if runtime == "" {
- runtime = util.Runtime()
- }
- localRuntime := util.FindLocalRuntime(runtime)
- if localRuntime != "" {
- runtime = localRuntime
- }
-
- // Default to just passing down our stdio.
- getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
- return os.Stdin, os.Stdout, os.Stderr
- }
-
- // Figure out how we're doing stdio handling, and create pipes and sockets.
- var stdio sync.WaitGroup
- var consoleListener *net.UnixListener
- var errorFds, closeBeforeReadingErrorFds []int
- stdioPipe := make([][]int, 3)
- copyConsole := false
- copyPipes := false
- finishCopy := make([]int, 2)
- if err = unix.Pipe(finishCopy); err != nil {
- return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio")
- }
- finishedCopy := make(chan struct{}, 1)
- var pargs []string
- if spec.Process != nil {
- pargs = spec.Process.Args
- if spec.Process.Terminal {
- copyConsole = true
- // Create a listening socket for accepting the container's terminal's PTY master.
- socketPath := filepath.Join(bundlePath, "console.sock")
- consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"})
- if err != nil {
- return 1, errors.Wrapf(err, "error creating socket %q to receive terminal descriptor", consoleListener.Addr())
- }
- // Add console socket arguments.
- moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath)
- } else {
- copyPipes = true
- // Figure out who should own the pipes.
- uid, gid, err := util.GetHostRootIDs(spec)
- if err != nil {
- return 1, err
- }
- // Create stdio pipes.
- if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil {
- return 1, err
- }
- if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil {
- return 1, err
- }
- errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]}
- closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]}
- // Set stdio to our pipes.
- getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
- stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin")
- stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout")
- stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr")
- return stdin, stdout, stderr
- }
- }
- } else {
- if options.Quiet {
- // Discard stdout.
- getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
- return os.Stdin, nil, os.Stderr
- }
- }
- }
-
- runtimeArgs := options.Args[:]
- if options.CgroupManager == config.SystemdCgroupsManager {
- runtimeArgs = append(runtimeArgs, "--systemd-cgroup")
- }
-
- // Build the commands that we'll execute.
- pidFile := filepath.Join(bundlePath, "pid")
- args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName)
- create := exec.Command(runtime, args...)
- setPdeathsig(create)
- create.Dir = bundlePath
- stdin, stdout, stderr := getCreateStdio()
- create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr
-
- args = append(options.Args, "start", containerName)
- start := exec.Command(runtime, args...)
- setPdeathsig(start)
- start.Dir = bundlePath
- start.Stderr = os.Stderr
-
- kill := func(signal string) *exec.Cmd {
- args := append(options.Args, "kill", containerName)
- if signal != "" {
- args = append(args, signal)
- }
- kill := exec.Command(runtime, args...)
- kill.Dir = bundlePath
- kill.Stderr = os.Stderr
- return kill
- }
-
- args = append(options.Args, "delete", containerName)
- del := exec.Command(runtime, args...)
- del.Dir = bundlePath
- del.Stderr = os.Stderr
-
- // Actually create the container.
- logrus.Debugf("Running %q", create.Args)
- err = create.Run()
- if err != nil {
- return 1, errors.Wrapf(err, "error from %s creating container for %v: %s", runtime, pargs, runCollectOutput(options.Logger, errorFds, closeBeforeReadingErrorFds))
- }
- defer func() {
- err2 := del.Run()
- if err2 != nil {
- if err == nil {
- err = errors.Wrapf(err2, "error deleting container")
- } else {
- options.Logger.Infof("error from %s deleting container: %v", runtime, err2)
- }
- }
- }()
-
- // Make sure we read the container's exit status when it exits.
- pidValue, err := ioutil.ReadFile(pidFile)
- if err != nil {
- return 1, err
- }
- pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
- if err != nil {
- return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue))
- }
- var stopped uint32
- var reaping sync.WaitGroup
- reaping.Add(1)
- go func() {
- defer reaping.Done()
- var err error
- _, err = unix.Wait4(pid, &wstatus, 0, nil)
- if err != nil {
- wstatus = 0
- options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err)
- }
- atomic.StoreUint32(&stopped, 1)
- }()
-
- if configureNetwork {
- if _, err := containerCreateW.Write([]byte{1}); err != nil {
- return 1, err
- }
- containerCreateW.Close()
- logrus.Debug("waiting for parent start message")
- b := make([]byte, 1)
- if _, err := containerStartR.Read(b); err != nil {
- return 1, errors.Wrap(err, "did not get container start message from parent")
- }
- containerStartR.Close()
- }
-
- if copyPipes {
- // We don't need the ends of the pipes that belong to the container.
- stdin.Close()
- if stdout != nil {
- stdout.Close()
- }
- stderr.Close()
- }
-
- // Handle stdio for the container in the background.
- stdio.Add(1)
- go runCopyStdio(options.Logger, &stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec)
-
- // Start the container.
- logrus.Debugf("Running %q", start.Args)
- err = start.Run()
- if err != nil {
- return 1, errors.Wrapf(err, "error from %s starting container", runtime)
- }
- defer func() {
- if atomic.LoadUint32(&stopped) == 0 {
- if err := kill("").Run(); err != nil {
- options.Logger.Infof("error from %s stopping container: %v", runtime, err)
- }
- atomic.StoreUint32(&stopped, 1)
- }
- }()
-
- // Wait for the container to exit.
- interrupted := make(chan os.Signal, 100)
- go func() {
- for range interrupted {
- if err := kill("SIGKILL").Run(); err != nil {
- logrus.Errorf("%v sending SIGKILL", err)
- }
- }
- }()
- signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
- for {
- now := time.Now()
- var state specs.State
- args = append(options.Args, "state", containerName)
- stat := exec.Command(runtime, args...)
- stat.Dir = bundlePath
- stat.Stderr = os.Stderr
- stateOutput, err := stat.Output()
- if err != nil {
- if atomic.LoadUint32(&stopped) != 0 {
- // container exited
- break
- }
- return 1, errors.Wrapf(err, "error reading container state from %s (got output: %q)", runtime, string(stateOutput))
- }
- if err = json.Unmarshal(stateOutput, &state); err != nil {
- return 1, errors.Wrapf(err, "error parsing container state %q from %s", string(stateOutput), runtime)
- }
- switch state.Status {
- case "running":
- case "stopped":
- atomic.StoreUint32(&stopped, 1)
- default:
- return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status)
- }
- if atomic.LoadUint32(&stopped) != 0 {
- break
- }
- select {
- case <-finishedCopy:
- atomic.StoreUint32(&stopped, 1)
- case <-time.After(time.Until(now.Add(100 * time.Millisecond))):
- continue
- }
- if atomic.LoadUint32(&stopped) != 0 {
- break
- }
- }
- signal.Stop(interrupted)
- close(interrupted)
-
- // Close the writing end of the stop-handling-stdio notification pipe.
- unix.Close(finishCopy[1])
- // Wait for the stdio copy goroutine to flush.
- stdio.Wait()
- // Wait until we finish reading the exit status.
- reaping.Wait()
-
- return wstatus, nil
-}
-
-func runCollectOutput(logger *logrus.Logger, fds, closeBeforeReadingFds []int) string { //nolint:interfacer
- for _, fd := range closeBeforeReadingFds {
- unix.Close(fd)
- }
- var b bytes.Buffer
- buf := make([]byte, 8192)
- for _, fd := range fds {
- nread, err := unix.Read(fd, buf)
- if err != nil {
- if errno, isErrno := err.(syscall.Errno); isErrno {
- switch errno {
- default:
- logger.Errorf("error reading from pipe %d: %v", fd, err)
- case syscall.EINTR, syscall.EAGAIN:
- }
- } else {
- logger.Errorf("unable to wait for data from pipe %d: %v", fd, err)
- }
- continue
- }
- for nread > 0 {
- r := buf[:nread]
- if nwritten, err := b.Write(r); err != nil || nwritten != len(r) {
- if nwritten != len(r) {
- logger.Errorf("error buffering data from pipe %d: %v", fd, err)
- break
- }
- }
- nread, err = unix.Read(fd, buf)
- if err != nil {
- if errno, isErrno := err.(syscall.Errno); isErrno {
- switch errno {
- default:
- logger.Errorf("error reading from pipe %d: %v", fd, err)
- case syscall.EINTR, syscall.EAGAIN:
- }
- } else {
- logger.Errorf("unable to wait for data from pipe %d: %v", fd, err)
- }
- break
- }
- }
- }
- return b.String()
-}
-
func setupRootlessNetwork(pid int) (teardown func(), err error) {
slirp4netns, err := exec.LookPath("slirp4netns")
if err != nil {
@@ -1075,19 +472,19 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe()
if err != nil {
- return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe")
+ return nil, fmt.Errorf("cannot create slirp4netns sync pipe: %w", err)
}
defer rootlessSlirpSyncR.Close()
// Be sure there are no fds inherited to slirp4netns except the sync pipe
files, err := ioutil.ReadDir("/proc/self/fd")
if err != nil {
- return nil, errors.Wrapf(err, "cannot list open fds")
+ return nil, fmt.Errorf("cannot list open fds: %w", err)
}
for _, f := range files {
fd, err := strconv.Atoi(f.Name())
if err != nil {
- return nil, errors.Wrapf(err, "cannot parse fd")
+ return nil, fmt.Errorf("cannot parse fd: %w", err)
}
if fd == int(rootlessSlirpSyncW.Fd()) {
continue
@@ -1103,13 +500,13 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
err = cmd.Start()
rootlessSlirpSyncW.Close()
if err != nil {
- return nil, errors.Wrapf(err, "cannot start slirp4netns")
+ return nil, fmt.Errorf("cannot start slirp4netns: %w", err)
}
b := make([]byte, 1)
for {
if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil {
- return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout")
+ return nil, fmt.Errorf("error setting slirp4netns pipe timeout: %w", err)
}
if _, err := rootlessSlirpSyncR.Read(b); err == nil {
break
@@ -1119,7 +516,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
var status syscall.WaitStatus
_, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
if err != nil {
- return nil, errors.Wrapf(err, "failed to read slirp4netns process status")
+ return nil, fmt.Errorf("failed to read slirp4netns process status: %w", err)
}
if status.Exited() || status.Signaled() {
return nil, errors.New("slirp4netns failed")
@@ -1127,7 +524,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
continue
}
- return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe")
+ return nil, fmt.Errorf("failed to read from slirp4netns sync pipe: %w", err)
}
}
@@ -1155,7 +552,7 @@ func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, optio
netns := fmt.Sprintf("/proc/%d/ns/net", pid)
netFD, err := unix.Open(netns, unix.O_RDONLY, 0)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error opening network namespace")
+ return nil, nil, fmt.Errorf("error opening network namespace: %w", err)
}
mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD)
@@ -1186,393 +583,27 @@ func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, optio
return teardown, netStatus, nil
}
-func setNonblock(logger *logrus.Logger, fd int, description string, nonblocking bool) (bool, error) { //nolint:interfacer
- mask, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0)
- if err != nil {
- return false, err
- }
- blocked := mask&unix.O_NONBLOCK == 0
-
- if err := unix.SetNonblock(fd, nonblocking); err != nil {
- if nonblocking {
- logger.Errorf("error setting %s to nonblocking: %v", description, err)
- } else {
- logger.Errorf("error setting descriptor %s blocking: %v", description, err)
- }
- }
- return blocked, err
-}
-
-func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) {
- defer func() {
- unix.Close(finishCopy[0])
- if copyPipes {
- unix.Close(stdioPipe[unix.Stdin][1])
- unix.Close(stdioPipe[unix.Stdout][0])
- unix.Close(stdioPipe[unix.Stderr][0])
- }
- stdio.Done()
- finishedCopy <- struct{}{}
- close(finishedCopy)
- }()
- // Map describing where data on an incoming descriptor should go.
- relayMap := make(map[int]int)
- // Map describing incoming and outgoing descriptors.
- readDesc := make(map[int]string)
- writeDesc := make(map[int]string)
- // Buffers.
- relayBuffer := make(map[int]*bytes.Buffer)
- // Set up the terminal descriptor or pipes for polling.
- if copyConsole {
- // Accept a connection over our listening socket.
- fd, err := runAcceptTerminal(logger, consoleListener, spec.Process.ConsoleSize)
- if err != nil {
- logger.Errorf("%v", err)
- return
- }
- terminalFD := fd
- // Input from our stdin, output from the terminal descriptor.
- relayMap[unix.Stdin] = terminalFD
- readDesc[unix.Stdin] = "stdin"
- relayBuffer[terminalFD] = new(bytes.Buffer)
- writeDesc[terminalFD] = "container terminal input"
- relayMap[terminalFD] = unix.Stdout
- readDesc[terminalFD] = "container terminal output"
- relayBuffer[unix.Stdout] = new(bytes.Buffer)
- writeDesc[unix.Stdout] = "output"
- // Set our terminal's mode to raw, to pass handling of special
- // terminal input to the terminal in the container.
- if term.IsTerminal(unix.Stdin) {
- if state, err := term.MakeRaw(unix.Stdin); err != nil {
- logger.Warnf("error setting terminal state: %v", err)
- } else {
- defer func() {
- if err = term.Restore(unix.Stdin, state); err != nil {
- logger.Errorf("unable to restore terminal state: %v", err)
- }
- }()
- }
- }
- }
- if copyPipes {
- // Input from our stdin, output from the stdout and stderr pipes.
- relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1]
- readDesc[unix.Stdin] = "stdin"
- relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer)
- writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin"
- relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout
- readDesc[stdioPipe[unix.Stdout][0]] = "container stdout"
- relayBuffer[unix.Stdout] = new(bytes.Buffer)
- writeDesc[unix.Stdout] = "stdout"
- relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr
- readDesc[stdioPipe[unix.Stderr][0]] = "container stderr"
- relayBuffer[unix.Stderr] = new(bytes.Buffer)
- writeDesc[unix.Stderr] = "stderr"
- }
- // Set our reading descriptors to non-blocking.
- for rfd, wfd := range relayMap {
- blocked, err := setNonblock(logger, rfd, readDesc[rfd], true)
- if err != nil {
- return
- }
- if blocked {
- defer setNonblock(logger, rfd, readDesc[rfd], false) // nolint:errcheck
- }
- setNonblock(logger, wfd, writeDesc[wfd], false) // nolint:errcheck
- }
-
- if copyPipes {
- setNonblock(logger, stdioPipe[unix.Stdin][1], writeDesc[stdioPipe[unix.Stdin][1]], true) // nolint:errcheck
- }
-
- runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc)
-}
-
-func canRetry(err error) bool {
- if errno, isErrno := err.(syscall.Errno); isErrno {
- return errno == syscall.EINTR || errno == syscall.EAGAIN
- }
- return false
-}
-
-func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) {
- closeStdin := false
-
- // Pass data back and forth.
- pollTimeout := -1
- for len(relayMap) > 0 {
- // Start building the list of descriptors to poll.
- pollFds := make([]unix.PollFd, 0, len(relayMap)+1)
- // Poll for a notification that we should stop handling stdio.
- pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP})
- // Poll on our reading descriptors.
- for rfd := range relayMap {
- pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP})
- }
- buf := make([]byte, 8192)
- // Wait for new data from any input descriptor, or a notification that we're done.
- _, err := unix.Poll(pollFds, pollTimeout)
- if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
- return
- }
- removes := make(map[int]struct{})
- for _, pollFd := range pollFds {
- // If this descriptor's just been closed from the other end, mark it for
- // removal from the set that we're checking for.
- if pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
- removes[int(pollFd.Fd)] = struct{}{}
- }
- // If the descriptor was closed elsewhere, remove it from our list.
- if pollFd.Revents&unix.POLLNVAL != 0 {
- logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)])
- removes[int(pollFd.Fd)] = struct{}{}
- }
- // If the POLLIN flag isn't set, then there's no data to be read from this descriptor.
- if pollFd.Revents&unix.POLLIN == 0 {
- continue
- }
- // Read whatever there is to be read.
- readFD := int(pollFd.Fd)
- writeFD, needToRelay := relayMap[readFD]
- if needToRelay {
- n, err := unix.Read(readFD, buf)
- if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
- return
- }
- // If it's zero-length on our stdin and we're
- // using pipes, it's an EOF, so close the stdin
- // pipe's writing end.
- if n == 0 && !canRetry(err) && int(pollFd.Fd) == unix.Stdin {
- removes[int(pollFd.Fd)] = struct{}{}
- } else if n > 0 {
- // Buffer the data in case we get blocked on where they need to go.
- nwritten, err := relayBuffer[writeFD].Write(buf[:n])
- if err != nil {
- logrus.Debugf("buffer: %v", err)
- continue
- }
- if nwritten != n {
- logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten)
- continue
- }
- // If this is the last of the data we'll be able to read from this
- // descriptor, read all that there is to read.
- for pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
- nr, err := unix.Read(readFD, buf)
- util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err))
- if nr <= 0 {
- break
- }
- nwritten, err := relayBuffer[writeFD].Write(buf[:nr])
- if err != nil {
- logrus.Debugf("buffer: %v", err)
- break
- }
- if nwritten != nr {
- logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten)
- break
- }
- }
- }
- }
- }
- // Try to drain the output buffers. Set the default timeout
- // for the next poll() to 100ms if we still have data to write.
- pollTimeout = -1
- for writeFD := range relayBuffer {
- if relayBuffer[writeFD].Len() > 0 {
- n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes())
- if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
- return
- }
- if n > 0 {
- relayBuffer[writeFD].Next(n)
- }
- if closeStdin && writeFD == stdioPipe[unix.Stdin][1] && stdioPipe[unix.Stdin][1] >= 0 && relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 {
- logrus.Debugf("closing stdin")
- unix.Close(stdioPipe[unix.Stdin][1])
- stdioPipe[unix.Stdin][1] = -1
- }
- }
- if relayBuffer[writeFD].Len() > 0 {
- pollTimeout = 100
- }
- }
- // Remove any descriptors which we don't need to poll any more from the poll descriptor list.
- for remove := range removes {
- if copyPipes && remove == unix.Stdin {
- closeStdin = true
- if relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 {
- logrus.Debugf("closing stdin")
- unix.Close(stdioPipe[unix.Stdin][1])
- stdioPipe[unix.Stdin][1] = -1
- }
- }
- delete(relayMap, remove)
- }
- // If the we-can-return pipe had anything for us, we're done.
- for _, pollFd := range pollFds {
- if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 {
- // The pipe is closed, indicating that we can stop now.
- return
- }
- }
- }
-}
-
-func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) {
- defer consoleListener.Close()
- c, err := consoleListener.AcceptUnix()
- if err != nil {
- return -1, errors.Wrapf(err, "error accepting socket descriptor connection")
- }
- defer c.Close()
- // Expect a control message over our new connection.
- b := make([]byte, 8192)
- oob := make([]byte, 8192)
- n, oobn, _, _, err := c.ReadMsgUnix(b, oob)
- if err != nil {
- return -1, errors.Wrapf(err, "error reading socket descriptor")
- }
- if n > 0 {
- logrus.Debugf("socket descriptor is for %q", string(b[:n]))
- }
- if oobn > len(oob) {
- return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn)
- }
- // Parse the control message.
- scm, err := unix.ParseSocketControlMessage(oob[:oobn])
- if err != nil {
- return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message")
- }
- logrus.Debugf("control messages: %v", scm)
- // Expect to get a descriptor.
- terminalFD := -1
- for i := range scm {
- fds, err := unix.ParseUnixRights(&scm[i])
- if err != nil {
- return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i])
- }
- logrus.Debugf("fds: %v", fds)
- if len(fds) == 0 {
- continue
- }
- terminalFD = fds[0]
- break
- }
- if terminalFD == -1 {
- return -1, errors.Errorf("unable to read terminal descriptor")
- }
- // Set the pseudoterminal's size to the configured size, or our own.
- winsize := &unix.Winsize{}
- if terminalSize != nil {
- // Use configured sizes.
- winsize.Row = uint16(terminalSize.Height)
- winsize.Col = uint16(terminalSize.Width)
- } else {
- if term.IsTerminal(unix.Stdin) {
- // Use the size of our terminal.
- if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil {
- logger.Warnf("error reading size of controlling terminal: %v", err)
- winsize.Row = 0
- winsize.Col = 0
- }
- }
- }
- if winsize.Row != 0 && winsize.Col != 0 {
- if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil {
- logger.Warnf("error setting size of container pseudoterminal: %v", err)
- }
- // FIXME - if we're connected to a terminal, we should
- // be passing the updated terminal size down when we
- // receive a SIGWINCH.
- }
- return terminalFD, nil
-}
-
// Create pipes to use for relaying stdio.
func runMakeStdioPipe(uid, gid int) ([][]int, error) {
stdioPipe := make([][]int, 3)
for i := range stdioPipe {
stdioPipe[i] = make([]int, 2)
if err := unix.Pipe(stdioPipe[i]); err != nil {
- return nil, errors.Wrapf(err, "error creating pipe for container FD %d", i)
+ return nil, fmt.Errorf("error creating pipe for container FD %d: %w", i, err)
}
}
if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil {
- return nil, errors.Wrapf(err, "error setting owner of stdin pipe descriptor")
+ return nil, fmt.Errorf("error setting owner of stdin pipe descriptor: %w", err)
}
if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil {
- return nil, errors.Wrapf(err, "error setting owner of stdout pipe descriptor")
+ return nil, fmt.Errorf("error setting owner of stdout pipe descriptor: %w", err)
}
if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil {
- return nil, errors.Wrapf(err, "error setting owner of stderr pipe descriptor")
+ return nil, fmt.Errorf("error setting owner of stderr pipe descriptor: %w", err)
}
return stdioPipe, nil
}
-func runUsingRuntimeMain() {
- var options runUsingRuntimeSubprocOptions
- // Set logging.
- if level := os.Getenv("LOGLEVEL"); level != "" {
- if ll, err := strconv.Atoi(level); err == nil {
- logrus.SetLevel(logrus.Level(ll))
- }
- }
- // Unpack our configuration.
- confPipe := os.NewFile(3, "confpipe")
- if confPipe == nil {
- fmt.Fprintf(os.Stderr, "error reading options pipe\n")
- os.Exit(1)
- }
- defer confPipe.Close()
- if err := json.NewDecoder(confPipe).Decode(&options); err != nil {
- fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err)
- os.Exit(1)
- }
- // Set ourselves up to read the container's exit status. We're doing this in a child process
- // so that we won't mess with the setting in a caller of the library.
- if err := setChildProcess(); err != nil {
- os.Exit(1)
- }
- ospec := options.Spec
- if ospec == nil {
- fmt.Fprintf(os.Stderr, "options spec not specified\n")
- os.Exit(1)
- }
-
- // open the pipes used to communicate with the parent process
- var containerCreateW *os.File
- var containerStartR *os.File
- if options.ConfigureNetwork {
- containerCreateW = os.NewFile(4, "containercreatepipe")
- if containerCreateW == nil {
- fmt.Fprintf(os.Stderr, "could not open fd 4\n")
- os.Exit(1)
- }
- containerStartR = os.NewFile(5, "containerstartpipe")
- if containerStartR == nil {
- fmt.Fprintf(os.Stderr, "could not open fd 5\n")
- os.Exit(1)
- }
- }
-
- // Run the container, start to finish.
- status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR)
- if err != nil {
- fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
- os.Exit(1)
- }
- // Pass the container's exit status back to the caller by exiting with the same status.
- if status.Exited() {
- os.Exit(status.ExitStatus())
- } else if status.Signaled() {
- fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal())
- os.Exit(1)
- }
- os.Exit(1)
-}
-
func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) {
// Set namespace options in the container configuration.
configureUserns := false
@@ -1602,20 +633,20 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
}
if namespaceOption.Host {
if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil {
- return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", namespaceOption.Name)
+ return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", namespaceOption.Name, err)
}
} else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil {
if namespaceOption.Path == "" {
- return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", namespaceOption.Name)
+ return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", namespaceOption.Name, err)
}
- return false, nil, false, errors.Wrapf(err, "error adding %q namespace %q for run", namespaceOption.Name, namespaceOption.Path)
+ return false, nil, false, fmt.Errorf("error adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err)
}
}
// If we've got mappings, we're going to have to create a user namespace.
if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns {
if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil {
- return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace))
+ return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", string(specs.UserNamespace), err)
}
hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("")
if err != nil {
@@ -1639,17 +670,17 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
}
if !specifiedNetwork {
if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil {
- return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.NetworkNamespace))
+ return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", string(specs.NetworkNamespace), err)
}
configureNetwork = (policy != define.NetworkDisabled)
}
} else {
if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil {
- return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.UserNamespace))
+ return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", string(specs.UserNamespace), err)
}
if !specifiedNetwork {
if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil {
- return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.NetworkNamespace))
+ return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", string(specs.NetworkNamespace), err)
}
}
}
@@ -1753,7 +784,7 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string)
ulimit = append(defaultUlimits, ulimit...)
for _, u := range ulimit {
if ul, err = units.ParseUlimit(u); err != nil {
- return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u)
+ return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
}
g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft))
@@ -1761,25 +792,14 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string)
return nil
}
-func (b *Builder) cleanupTempVolumes() {
- for tempVolume, val := range b.TempVolumes {
- if val {
- if err := overlay.RemoveTemp(tempVolume); err != nil {
- b.Logger.Errorf(err.Error())
- }
- b.TempVolumes[tempVolume] = false
- }
- }
-}
-
func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {
// Make sure the overlay directory is clean before running
containerDir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container directory for %s", b.ContainerID)
+ return nil, fmt.Errorf("error looking up container directory for %s: %w", b.ContainerID, err)
}
if err := overlay.CleanupContent(containerDir); err != nil {
- return nil, errors.Wrapf(err, "error cleaning up overlay content for %s", b.ContainerID)
+ return nil, fmt.Errorf("error cleaning up overlay content for %s: %w", b.ContainerID, err)
}
parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
@@ -1846,7 +866,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID)
if err != nil {
- return specs.Mount{}, errors.Wrapf(err, "failed to create TempDir in the %s directory", containerDir)
+ return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err)
}
overlayOpts := overlay.Options{
@@ -1946,16 +966,16 @@ func setupReadOnlyPaths(g *generate.Generator) {
func setupCapAdd(g *generate.Generator, caps ...string) error {
for _, cap := range caps {
if err := g.AddProcessCapabilityBounding(cap); err != nil {
- return errors.Wrapf(err, "error adding %q to the bounding capability set", cap)
+ return fmt.Errorf("error adding %q to the bounding capability set: %w", cap, err)
}
if err := g.AddProcessCapabilityEffective(cap); err != nil {
- return errors.Wrapf(err, "error adding %q to the effective capability set", cap)
+ return fmt.Errorf("error adding %q to the effective capability set: %w", cap, err)
}
if err := g.AddProcessCapabilityPermitted(cap); err != nil {
- return errors.Wrapf(err, "error adding %q to the permitted capability set", cap)
+ return fmt.Errorf("error adding %q to the permitted capability set: %w", cap, err)
}
if err := g.AddProcessCapabilityAmbient(cap); err != nil {
- return errors.Wrapf(err, "error adding %q to the ambient capability set", cap)
+ return fmt.Errorf("error adding %q to the ambient capability set: %w", cap, err)
}
}
return nil
@@ -1964,16 +984,16 @@ func setupCapAdd(g *generate.Generator, caps ...string) error {
func setupCapDrop(g *generate.Generator, caps ...string) error {
for _, cap := range caps {
if err := g.DropProcessCapabilityBounding(cap); err != nil {
- return errors.Wrapf(err, "error removing %q from the bounding capability set", cap)
+ return fmt.Errorf("error removing %q from the bounding capability set: %w", cap, err)
}
if err := g.DropProcessCapabilityEffective(cap); err != nil {
- return errors.Wrapf(err, "error removing %q from the effective capability set", cap)
+ return fmt.Errorf("error removing %q from the effective capability set: %w", cap, err)
}
if err := g.DropProcessCapabilityPermitted(cap); err != nil {
- return errors.Wrapf(err, "error removing %q from the permitted capability set", cap)
+ return fmt.Errorf("error removing %q from the permitted capability set: %w", cap, err)
}
if err := g.DropProcessCapabilityAmbient(cap); err != nil {
- return errors.Wrapf(err, "error removing %q from the ambient capability set", cap)
+ return fmt.Errorf("error removing %q from the ambient capability set: %w", cap, err)
}
}
return nil
@@ -2002,97 +1022,6 @@ func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops [
return setupCapDrop(g, drops...)
}
-// Search for a command that isn't given as an absolute path using the $PATH
-// under the rootfs. We can't resolve absolute symbolic links without
-// chroot()ing, which we may not be able to do, so just accept a link as a
-// valid resolution.
-func runLookupPath(g *generate.Generator, command []string) []string {
- // Look for the configured $PATH.
- spec := g.Config
- envPath := ""
- for i := range spec.Process.Env {
- if strings.HasPrefix(spec.Process.Env[i], "PATH=") {
- envPath = spec.Process.Env[i]
- }
- }
- // If there is no configured $PATH, supply one.
- if envPath == "" {
- defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin"
- envPath = "PATH=" + defaultPath
- g.AddProcessEnv("PATH", defaultPath)
- }
- // No command, nothing to do.
- if len(command) == 0 {
- return command
- }
- // Command is already an absolute path, use it as-is.
- if filepath.IsAbs(command[0]) {
- return command
- }
- // For each element in the PATH,
- for _, pathEntry := range filepath.SplitList(envPath[5:]) {
- // if it's the empty string, it's ".", which is the Cwd,
- if pathEntry == "" {
- pathEntry = spec.Process.Cwd
- }
- // build the absolute path which it might be,
- candidate := filepath.Join(pathEntry, command[0])
- // check if it's there,
- if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil {
- // and if it's not a directory, and either a symlink or executable,
- if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) {
- // use that.
- return append([]string{candidate}, command[1:]...)
- }
- }
- }
- return command
-}
-
-func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) (string, error) {
- // Set the user UID/GID/supplemental group list/capabilities lists.
- user, homeDir, err := b.userForRun(mountPoint, options.User)
- if err != nil {
- return "", err
- }
- if err := setupCapabilities(g, b.Capabilities, options.AddCapabilities, options.DropCapabilities); err != nil {
- return "", err
- }
- g.SetProcessUID(user.UID)
- g.SetProcessGID(user.GID)
- for _, gid := range user.AdditionalGids {
- g.AddProcessAdditionalGid(gid)
- }
-
- // Remove capabilities if not running as root except Bounding set
- if user.UID != 0 {
- bounding := g.Config.Process.Capabilities.Bounding
- g.ClearProcessCapabilities()
- g.Config.Process.Capabilities.Bounding = bounding
- }
-
- return homeDir, nil
-}
-
-func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions, defaultEnv []string) {
- g.ClearProcessEnv()
-
- if b.CommonBuildOpts.HTTPProxy {
- for _, envSpec := range config.ProxyEnv {
- if envVal, ok := os.LookupEnv(envSpec); ok {
- g.AddProcessEnv(envSpec, envVal)
- }
- }
- }
-
- for _, envSpec := range util.MergeEnv(util.MergeEnv(defaultEnv, b.Env()), options.Env) {
- env := strings.SplitN(envSpec, "=", 2)
- if len(env) > 1 {
- g.AddProcessEnv(env[0], env[1])
- }
- }
-}
-
func addOrReplaceMount(mounts []specs.Mount, mount specs.Mount) []spec.Mount {
for i := range mounts {
if mounts[i].Destination == mount.Destination {
@@ -2231,367 +1160,6 @@ func checkIdsGreaterThan5(ids []spec.LinuxIDMapping) bool {
return false
}
-func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks,
- moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile string) (err error) {
- var confwg sync.WaitGroup
- config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
- Options: options,
- Spec: spec,
- RootPath: rootPath,
- BundlePath: bundlePath,
- ConfigureNetwork: configureNetwork,
- MoreCreateArgs: moreCreateArgs,
- ContainerName: containerName,
- Isolation: isolation,
- })
- if conferr != nil {
- return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand)
- }
- cmd := reexec.Command(runUsingRuntimeCommand)
- setPdeathsig(cmd)
- cmd.Dir = bundlePath
- cmd.Stdin = options.Stdin
- if cmd.Stdin == nil {
- cmd.Stdin = os.Stdin
- }
- cmd.Stdout = options.Stdout
- if cmd.Stdout == nil {
- cmd.Stdout = os.Stdout
- }
- cmd.Stderr = options.Stderr
- if cmd.Stderr == nil {
- cmd.Stderr = os.Stderr
- }
- cmd.Env = util.MergeEnv(os.Environ(), []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())})
- preader, pwriter, err := os.Pipe()
- if err != nil {
- return errors.Wrapf(err, "error creating configuration pipe")
- }
- confwg.Add(1)
- go func() {
- _, conferr = io.Copy(pwriter, bytes.NewReader(config))
- if conferr != nil {
- conferr = errors.Wrapf(conferr, "error while copying configuration down pipe to child process")
- }
- confwg.Done()
- }()
-
- // create network configuration pipes
- var containerCreateR, containerCreateW fileCloser
- var containerStartR, containerStartW fileCloser
- if configureNetwork {
- containerCreateR.file, containerCreateW.file, err = os.Pipe()
- if err != nil {
- return errors.Wrapf(err, "error creating container create pipe")
- }
- defer containerCreateR.Close()
- defer containerCreateW.Close()
-
- containerStartR.file, containerStartW.file, err = os.Pipe()
- if err != nil {
- return errors.Wrapf(err, "error creating container create pipe")
- }
- defer containerStartR.Close()
- defer containerStartW.Close()
- cmd.ExtraFiles = []*os.File{containerCreateW.file, containerStartR.file}
- }
-
- cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
- defer preader.Close()
- defer pwriter.Close()
- if err := cmd.Start(); err != nil {
- return errors.Wrapf(err, "error while starting runtime")
- }
-
- interrupted := make(chan os.Signal, 100)
- go func() {
- for receivedSignal := range interrupted {
- if err := cmd.Process.Signal(receivedSignal); err != nil {
- logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal)
- }
- }
- }()
- signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
-
- if configureNetwork {
- // we already passed the fd to the child, now close the writer so we do not hang if the child closes it
- containerCreateW.Close()
- if err := waitForSync(containerCreateR.file); err != nil {
- // we do not want to return here since we want to capture the exit code from the child via cmd.Wait()
- // close the pipes here so that the child will not hang forever
- containerCreateR.Close()
- containerStartW.Close()
- logrus.Errorf("did not get container create message from subprocess: %v", err)
- } else {
- pidFile := filepath.Join(bundlePath, "pid")
- pidValue, err := ioutil.ReadFile(pidFile)
- if err != nil {
- return err
- }
- pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
- if err != nil {
- return errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue))
- }
-
- teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName)
- if teardown != nil {
- defer teardown()
- }
- if err != nil {
- return err
- }
-
- // only add hosts if we manage the hosts file
- if hostsFile != "" {
- var entries etchosts.HostEntries
- if netstatus != nil {
- entries = etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName)
- } else {
- // we have slirp4netns, default to slirp4netns ip since this is not configurable in buildah
- entries = etchosts.HostEntries{{IP: "10.0.2.100", Names: []string{spec.Hostname, buildContainerName}}}
- }
- // make sure to sync this with (b *Builder) generateHosts()
- err = etchosts.Add(hostsFile, entries)
- if err != nil {
- return err
- }
- }
-
- logrus.Debug("network namespace successfully setup, send start message to child")
- _, err = containerStartW.file.Write([]byte{1})
- if err != nil {
- return err
- }
- }
- }
-
- if err := cmd.Wait(); err != nil {
- return errors.Wrapf(err, "error while running runtime")
- }
- confwg.Wait()
- signal.Stop(interrupted)
- close(interrupted)
- if err == nil {
- return conferr
- }
- if conferr != nil {
- logrus.Debugf("%v", conferr)
- }
- return err
-}
-
-// fileCloser is a helper struct to prevent closing the file twice in the code
-// users must call (fileCloser).Close() and not fileCloser.File.Close()
-type fileCloser struct {
- file *os.File
- closed bool
-}
-
-func (f *fileCloser) Close() {
- if !f.closed {
- if err := f.file.Close(); err != nil {
- logrus.Errorf("failed to close file: %v", err)
- }
- f.closed = true
- }
-}
-
-// waitForSync waits for a maximum of 4 minutes to read something from the file
-func waitForSync(pipeR *os.File) error {
- if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil {
- return err
- }
- b := make([]byte, 16)
- _, err := pipeR.Read(b)
- return err
-}
-
-func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error {
- switch isolation {
- case IsolationOCIRootless:
- // only change the netns if the caller did not set it
- if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns == nil {
- if _, err := exec.LookPath("slirp4netns"); err != nil {
- // if slirp4netns is not installed we have to use the hosts net namespace
- options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
- }
- }
- fallthrough
- case IsolationOCI:
- pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace))
- userns := options.NamespaceOptions.Find(string(specs.UserNamespace))
- if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) {
- return errors.Errorf("not allowed to mix host PID namespace with container user namespace")
- }
- }
- return nil
-}
-
-// DefaultNamespaceOptions returns the default namespace settings from the
-// runtime-tools generator library.
-func DefaultNamespaceOptions() (define.NamespaceOptions, error) {
- cfg, err := config.Default()
- if err != nil {
- return nil, errors.Wrapf(err, "failed to get container config")
- }
- options := define.NamespaceOptions{
- {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"},
- {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"},
- {Name: string(specs.MountNamespace), Host: false},
- {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host"},
- {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"},
- {Name: string(specs.UserNamespace), Host: cfg.Containers.UserNS == "host"},
- {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"},
- }
- return options, nil
-}
-
-func contains(volumes []string, v string) bool {
- for _, i := range volumes {
- if i == v {
- return true
- }
- }
- return false
-}
-
-type runUsingRuntimeSubprocOptions struct {
- Options RunOptions
- Spec *specs.Spec
- RootPath string
- BundlePath string
- ConfigureNetwork bool
- MoreCreateArgs []string
- ContainerName string
- Isolation define.Isolation
-}
-
-func init() {
- reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain)
-}
-
-// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs
-func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
- mountTargets := make([]string, 0, 10)
- tmpFiles := make([]string, 0, len(mounts))
- mountImages := make([]string, 0, 10)
- finalMounts := make([]specs.Mount, 0, len(mounts))
- agents := make([]*sshagent.AgentServer, 0, len(mounts))
- sshCount := 0
- defaultSSHSock := ""
- tokens := []string{}
- lockedTargets := []string{}
- for _, mount := range mounts {
- arr := strings.SplitN(mount, ",", 2)
-
- kv := strings.Split(arr[0], "=")
- if len(kv) != 2 || kv[0] != "type" {
- return nil, nil, errors.New("invalid mount type")
- }
- if len(arr) == 2 {
- tokens = strings.Split(arr[1], ",")
- }
-
- switch kv[1] {
- case "secret":
- mount, envFile, err := b.getSecretMount(tokens, sources.Secrets, idMaps)
- if err != nil {
- return nil, nil, err
- }
- if mount != nil {
- finalMounts = append(finalMounts, *mount)
- mountTargets = append(mountTargets, mount.Destination)
- if envFile != "" {
- tmpFiles = append(tmpFiles, envFile)
- }
- }
- case "ssh":
- mount, agent, err := b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps)
- if err != nil {
- return nil, nil, err
- }
- if mount != nil {
- finalMounts = append(finalMounts, *mount)
- mountTargets = append(mountTargets, mount.Destination)
- agents = append(agents, agent)
- if sshCount == 0 {
- defaultSSHSock = mount.Destination
- }
- // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i}
- sshCount++
- }
- case "bind":
- mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps)
- if err != nil {
- return nil, nil, err
- }
- finalMounts = append(finalMounts, *mount)
- mountTargets = append(mountTargets, mount.Destination)
- // only perform cleanup if image was mounted ignore everything else
- if image != "" {
- mountImages = append(mountImages, image)
- }
- case "tmpfs":
- mount, err := b.getTmpfsMount(tokens, idMaps)
- if err != nil {
- return nil, nil, err
- }
- finalMounts = append(finalMounts, *mount)
- mountTargets = append(mountTargets, mount.Destination)
- case "cache":
- mount, lockedPaths, err := b.getCacheMount(tokens, sources.StageMountPoints, idMaps)
- if err != nil {
- return nil, nil, err
- }
- finalMounts = append(finalMounts, *mount)
- mountTargets = append(mountTargets, mount.Destination)
- lockedTargets = lockedPaths
- default:
- return nil, nil, errors.Errorf("invalid mount type %q", kv[1])
- }
- }
- artifacts := &runMountArtifacts{
- RunMountTargets: mountTargets,
- TmpFiles: tmpFiles,
- Agents: agents,
- MountedImages: mountImages,
- SSHAuthSock: defaultSSHSock,
- LockedTargets: lockedTargets,
- }
- return finalMounts, artifacts, nil
-}
-
-func (b *Builder) getBindMount(tokens []string, context *imagetypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, string, error) {
- if contextDir == "" {
- return nil, "", errors.New("Context Directory for current run invocation is not configured")
- }
- var optionMounts []specs.Mount
- mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints)
- if err != nil {
- return nil, image, err
- }
- optionMounts = append(optionMounts, mount)
- volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
- if err != nil {
- return nil, image, err
- }
- return &volumes[0], image, nil
-}
-
-func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*spec.Mount, error) {
- var optionMounts []specs.Mount
- mount, err := internalParse.GetTmpfsMount(tokens)
- if err != nil {
- return nil, err
- }
- optionMounts = append(optionMounts, mount)
- volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
- if err != nil {
- return nil, err
- }
- return &volumes[0], nil
-}
-
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, []string, error) {
var optionMounts []specs.Mount
mount, lockedTargets, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints)
@@ -2606,336 +1174,6 @@ func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]int
return &volumes[0], lockedTargets, nil
}
-func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secret, idMaps IDMaps) (*spec.Mount, string, error) {
- errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
- if len(tokens) == 0 {
- return nil, "", errInvalidSyntax
- }
- var err error
- var id, target string
- var required bool
- var uid, gid uint32
- var mode uint32 = 0400
- for _, val := range tokens {
- kv := strings.SplitN(val, "=", 2)
- switch kv[0] {
- case "id":
- id = kv[1]
- case "target", "dst", "destination":
- target = kv[1]
- case "required":
- required, err = strconv.ParseBool(kv[1])
- if err != nil {
- return nil, "", errInvalidSyntax
- }
- case "mode":
- mode64, err := strconv.ParseUint(kv[1], 8, 32)
- if err != nil {
- return nil, "", errInvalidSyntax
- }
- mode = uint32(mode64)
- case "uid":
- uid64, err := strconv.ParseUint(kv[1], 10, 32)
- if err != nil {
- return nil, "", errInvalidSyntax
- }
- uid = uint32(uid64)
- case "gid":
- gid64, err := strconv.ParseUint(kv[1], 10, 32)
- if err != nil {
- return nil, "", errInvalidSyntax
- }
- gid = uint32(gid64)
- default:
- return nil, "", errInvalidSyntax
- }
- }
-
- if id == "" {
- return nil, "", errInvalidSyntax
- }
- // Default location for secretis is /run/secrets/id
- if target == "" {
- target = "/run/secrets/" + id
- }
-
- secr, ok := secrets[id]
- if !ok {
- if required {
- return nil, "", errors.Errorf("secret required but no secret with id %s found", id)
- }
- return nil, "", nil
- }
- var data []byte
- var envFile string
- var ctrFileOnHost string
-
- switch secr.SourceType {
- case "env":
- data = []byte(os.Getenv(secr.Source))
- tmpFile, err := ioutil.TempFile("/dev/shm", "buildah*")
- if err != nil {
- return nil, "", err
- }
- envFile = tmpFile.Name()
- ctrFileOnHost = tmpFile.Name()
- case "file":
- containerWorkingDir, err := b.store.ContainerDirectory(b.ContainerID)
- if err != nil {
- return nil, "", err
- }
- data, err = ioutil.ReadFile(secr.Source)
- if err != nil {
- return nil, "", err
- }
- ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id)
- default:
- return nil, "", errors.New("invalid source secret type")
- }
-
- // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod,
- // chown and relabel it for the container user and we don't want to mess with the original file
- if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil {
- return nil, "", err
- }
- if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil {
- return nil, "", err
- }
-
- if err := label.Relabel(ctrFileOnHost, b.MountLabel, false); err != nil {
- return nil, "", err
- }
- hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
- if err != nil {
- return nil, "", err
- }
- if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil {
- return nil, "", err
- }
- if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil {
- return nil, "", err
- }
- newMount := specs.Mount{
- Destination: target,
- Type: "bind",
- Source: ctrFileOnHost,
- Options: []string{"bind", "rprivate", "ro"},
- }
- return &newMount, envFile, nil
-}
-
-// getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container
-func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, idMaps IDMaps) (*spec.Mount, *sshagent.AgentServer, error) {
- errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
-
- var err error
- var id, target string
- var required bool
- var uid, gid uint32
- var mode uint32 = 400
- for _, val := range tokens {
- kv := strings.SplitN(val, "=", 2)
- if len(kv) < 2 {
- return nil, nil, errInvalidSyntax
- }
- switch kv[0] {
- case "id":
- id = kv[1]
- case "target", "dst", "destination":
- target = kv[1]
- case "required":
- required, err = strconv.ParseBool(kv[1])
- if err != nil {
- return nil, nil, errInvalidSyntax
- }
- case "mode":
- mode64, err := strconv.ParseUint(kv[1], 8, 32)
- if err != nil {
- return nil, nil, errInvalidSyntax
- }
- mode = uint32(mode64)
- case "uid":
- uid64, err := strconv.ParseUint(kv[1], 10, 32)
- if err != nil {
- return nil, nil, errInvalidSyntax
- }
- uid = uint32(uid64)
- case "gid":
- gid64, err := strconv.ParseUint(kv[1], 10, 32)
- if err != nil {
- return nil, nil, errInvalidSyntax
- }
- gid = uint32(gid64)
- default:
- return nil, nil, errInvalidSyntax
- }
- }
-
- if id == "" {
- id = "default"
- }
- // Default location for secretis is /run/buildkit/ssh_agent.{i}
- if target == "" {
- target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count)
- }
-
- sshsource, ok := sshsources[id]
- if !ok {
- if required {
- return nil, nil, errors.Errorf("ssh required but no ssh with id %s found", id)
- }
- return nil, nil, nil
- }
- // Create new agent from keys or socket
- fwdAgent, err := sshagent.NewAgentServer(sshsource)
- if err != nil {
- return nil, nil, err
- }
- // Start ssh server, and get the host sock we're mounting in the container
- hostSock, err := fwdAgent.Serve(b.ProcessLabel)
- if err != nil {
- return nil, nil, err
- }
-
- if err := label.Relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil {
- if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
- b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
- }
- return nil, nil, err
- }
- if err := label.Relabel(hostSock, b.MountLabel, false); err != nil {
- if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
- b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
- }
- return nil, nil, err
- }
- hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
- if err != nil {
- if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
- b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
- }
- return nil, nil, err
- }
- if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil {
- if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
- b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
- }
- return nil, nil, err
- }
- if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil {
- if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
- b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
- }
- return nil, nil, err
- }
- newMount := specs.Mount{
- Destination: target,
- Type: "bind",
- Source: hostSock,
- Options: []string{"bind", "rprivate", "ro"},
- }
- return &newMount, fwdAgent, nil
-}
-
-// cleanupRunMounts cleans up run mounts so they only appear in this run.
-func (b *Builder) cleanupRunMounts(context *imagetypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error {
- for _, agent := range artifacts.Agents {
- err := agent.Shutdown()
- if err != nil {
- return err
- }
- }
-
- //cleanup any mounted images for this run
- for _, image := range artifacts.MountedImages {
- if image != "" {
- // if flow hits here some image was mounted for this run
- i, err := internalUtil.LookupImage(context, b.store, image)
- if err == nil {
- // silently try to unmount and do nothing
- // if image is being used by something else
- _ = i.Unmount(false)
- }
- if errors.Cause(err) == storagetypes.ErrImageUnknown {
- // Ignore only if ErrImageUnknown
- // Reason: Image is already unmounted do nothing
- continue
- }
- return err
- }
- }
-
- opts := copier.RemoveOptions{
- All: true,
- }
- for _, path := range artifacts.RunMountTargets {
- err := copier.Remove(mountpoint, path, opts)
- if err != nil {
- return err
- }
- }
- var prevErr error
- for _, path := range artifacts.TmpFiles {
- err := os.Remove(path)
- if !os.IsNotExist(err) {
- if prevErr != nil {
- logrus.Error(prevErr)
- }
- prevErr = err
- }
- }
- // unlock if any locked files from this RUN statement
- for _, path := range artifacts.LockedTargets {
- _, err := os.Stat(path)
- if err != nil {
- // Lockfile not found this might be a problem,
- // since LockedTargets must contain list of all locked files
- // don't break here since we need to unlock other files but
- // log so user can take a look
- logrus.Warnf("Lockfile %q was expected here, stat failed with %v", path, err)
- continue
- }
- lockfile, err := lockfile.GetLockfile(path)
- if err != nil {
- // unable to get lockfile
- // lets log error and continue
- // unlocking other files
- logrus.Warn(err)
- continue
- }
- if lockfile.Locked() {
- lockfile.Unlock()
- } else {
- logrus.Warnf("Lockfile %q was expected to be locked, this is unexpected", path)
- continue
- }
- }
- return prevErr
-}
-
-// getNetworkInterface creates the network interface
-func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) {
- conf, err := config.Default()
- if err != nil {
- return nil, err
- }
- // copy the config to not modify the default by accident
- newconf := *conf
- if len(cniConfDir) > 0 {
- newconf.Network.NetworkConfigDir = cniConfDir
- }
- if len(cniPluginPath) > 0 {
- plugins := strings.Split(cniPluginPath, string(os.PathListSeparator))
- newconf.Network.CNIPluginDirs = plugins
- }
-
- _, netInt, err := network.NetworkBackend(store, &newconf, false)
- if err != nil {
- return nil, err
- }
- return netInt, nil
-}
-
// setPdeathsig sets a parent-death signal for the process
func setPdeathsig(cmd *exec.Cmd) {
if cmd.SysProcAttr == nil {
diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go
index 280176dba..68a3dac24 100644
--- a/vendor/github.com/containers/buildah/run_unix.go
+++ b/vendor/github.com/containers/buildah/run_unix.go
@@ -1,13 +1,15 @@
+//go:build darwin
// +build darwin
package buildah
import (
+ "errors"
+
"github.com/containers/buildah/define"
nettypes "github.com/containers/common/libnetwork/types"
- "github.com/opencontainers/runtime-spec/specs-go"
"github.com/containers/storage"
- "github.com/pkg/errors"
+ "github.com/opencontainers/runtime-spec/specs-go"
)
// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures.
diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go
index f0640ffe2..b135be7e5 100644
--- a/vendor/github.com/containers/buildah/run_unsupported.go
+++ b/vendor/github.com/containers/buildah/run_unsupported.go
@@ -1,11 +1,13 @@
-// +build !linux,!darwin
+//go:build !linux && !darwin && !freebsd
+// +build !linux,!darwin,!freebsd
package buildah
import (
+ "errors"
+
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/storage"
- "github.com/pkg/errors"
)
func setChildProcess() error {
diff --git a/vendor/github.com/containers/buildah/seccomp.go b/vendor/github.com/containers/buildah/seccomp.go
index fc7811098..668123233 100644
--- a/vendor/github.com/containers/buildah/seccomp.go
+++ b/vendor/github.com/containers/buildah/seccomp.go
@@ -1,13 +1,14 @@
+//go:build seccomp && linux
// +build seccomp,linux
package buildah
import (
+ "fmt"
"io/ioutil"
"github.com/containers/common/pkg/seccomp"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
@@ -17,17 +18,17 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
case "":
seccompConfig, err := seccomp.GetDefaultProfile(spec)
if err != nil {
- return errors.Wrapf(err, "loading default seccomp profile failed")
+ return fmt.Errorf("loading default seccomp profile failed: %w", err)
}
spec.Linux.Seccomp = seccompConfig
default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
if err != nil {
- return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath)
+ return fmt.Errorf("opening seccomp profile (%s) failed: %w", seccompProfilePath, err)
}
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
if err != nil {
- return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath)
+ return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
}
spec.Linux.Seccomp = seccompConfig
}
diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go
index 83fc867a2..b186cb5e9 100644
--- a/vendor/github.com/containers/buildah/selinux.go
+++ b/vendor/github.com/containers/buildah/selinux.go
@@ -9,7 +9,6 @@ import (
"github.com/opencontainers/runtime-tools/generate"
selinux "github.com/opencontainers/selinux/go-selinux"
- "github.com/pkg/errors"
)
func selinuxGetEnabled() bool {
@@ -30,12 +29,12 @@ func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) erro
}
pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file")
if err != nil {
- return errors.Wrapf(err, "computing file creation context for pipes")
+ return fmt.Errorf("computing file creation context for pipes: %w", err)
}
for i := range stdioPipe {
pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0])
if err := selinux.SetFileLabel(pipeFdName, pipeContext); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "setting file label on %q", pipeFdName)
+ return fmt.Errorf("setting file label on %q: %w", pipeFdName, err)
}
}
return nil
diff --git a/vendor/github.com/containers/buildah/unmount.go b/vendor/github.com/containers/buildah/unmount.go
index b86ad92fd..ae9726ee3 100644
--- a/vendor/github.com/containers/buildah/unmount.go
+++ b/vendor/github.com/containers/buildah/unmount.go
@@ -1,19 +1,17 @@
package buildah
-import (
- "github.com/pkg/errors"
-)
+import "fmt"
// Unmount unmounts a build container.
func (b *Builder) Unmount() error {
_, err := b.store.Unmount(b.ContainerID, false)
if err != nil {
- return errors.Wrapf(err, "error unmounting build container %q", b.ContainerID)
+ return fmt.Errorf("error unmounting build container %q: %w", b.ContainerID, err)
}
b.MountPoint = ""
err = b.Save()
if err != nil {
- return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID)
+ return fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 9bfa9d268..6ebd04a0c 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -1,6 +1,8 @@
package buildah
import (
+ "errors"
+ "fmt"
"io"
"os"
"path/filepath"
@@ -16,7 +18,6 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -107,7 +108,7 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
if err != nil {
- return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistriesv2.ConfigPath(sc))
+ return false, fmt.Errorf("unable to parse the registries configuration (%s): %w", sysregistriesv2.ConfigPath(sc), err)
}
if reginfo != nil {
if reginfo.Blocked {
@@ -150,7 +151,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
if selinuxGetEnabled() {
containers, err := store.Containers()
if err != nil {
- return errors.Wrapf(err, "error getting list of containers")
+ return fmt.Errorf("error getting list of containers: %w", err)
}
for _, c := range containers {
@@ -159,7 +160,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
} else {
b, err := OpenBuilder(store, c.ID)
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
// Ignore not exist errors since containers probably created by other tool
// TODO, we need to read other containers json data to reserve their SELinux labels
continue
@@ -168,7 +169,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
}
// Prevent different containers from using same MCS label
if err := label.ReserveLabel(b.ProcessLabel); err != nil {
- return errors.Wrapf(err, "error reserving SELinux label %q", b.ProcessLabel)
+ return fmt.Errorf("error reserving SELinux label %q: %w", b.ProcessLabel, err)
}
}
}
@@ -218,10 +219,10 @@ func extractWithTar(root, src, dest string) error {
wg.Wait()
if getErr != nil {
- return errors.Wrapf(getErr, "error reading %q", src)
+ return fmt.Errorf("error reading %q: %w", src, getErr)
}
if putErr != nil {
- return errors.Wrapf(putErr, "error copying contents of %q to %q", src, dest)
+ return fmt.Errorf("error copying contents of %q to %q: %w", src, dest, putErr)
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 986e1d9f7..ffebd3146 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -1,6 +1,7 @@
package util
import (
+ "errors"
"fmt"
"io"
"net/url"
@@ -24,7 +25,6 @@ import (
"github.com/docker/distribution/registry/api/errcode"
"github.com/opencontainers/go-digest"
specs "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -118,18 +118,18 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora
var name reference.Named
nameList, _, err := resolveName(n, systemContext, store)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing name %q", n)
+ return nil, fmt.Errorf("error parsing name %q: %w", n, err)
}
if len(nameList) == 0 {
named, err := reference.ParseNormalizedNamed(n)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing name %q", n)
+ return nil, fmt.Errorf("error parsing name %q: %w", n, err)
}
name = named
} else {
named, err := reference.ParseNormalizedNamed(nameList[0])
if err != nil {
- return nil, errors.Wrapf(err, "error parsing name %q", nameList[0])
+ return nil, fmt.Errorf("error parsing name %q: %w", nameList[0], err)
}
name = named
}
@@ -169,7 +169,7 @@ func ResolveNameToReferences(
) (refs []types.ImageReference, err error) {
names, transport, err := resolveName(image, systemContext, store)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing name %q", image)
+ return nil, fmt.Errorf("error parsing name %q: %w", image, err)
}
if transport != DefaultTransport {
@@ -185,7 +185,7 @@ func ResolveNameToReferences(
refs = append(refs, ref)
}
if len(refs) == 0 {
- return nil, errors.Errorf("error locating images with names %v", names)
+ return nil, fmt.Errorf("error locating images with names %v", names)
}
return refs, nil
}
@@ -206,7 +206,7 @@ func AddImageNames(store storage.Store, firstRegistry string, systemContext *typ
for _, tag := range addNames {
if err := localImage.Tag(tag); err != nil {
- return errors.Wrapf(err, "error tagging image %s", image.ID)
+ return fmt.Errorf("error tagging image %s: %w", image.ID, err)
}
}
@@ -217,7 +217,7 @@ func AddImageNames(store storage.Store, firstRegistry string, systemContext *typ
// error message that reflects the reason of the failure.
// In case err type is not a familiar one the error "defaultError" is returned.
func GetFailureCause(err, defaultError error) error {
- switch nErr := errors.Cause(err).(type) {
+ switch nErr := err.(type) {
case errcode.Errors:
return err
case errcode.Error, *url.Error:
@@ -263,7 +263,7 @@ func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (ui
}
}
if !uidMapped {
- return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
+ return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
}
gidMapped := true
for _, m := range gidmap {
@@ -275,7 +275,7 @@ func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (ui
}
}
if !gidMapped {
- return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
+ return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
}
return uid, gid, nil
}
@@ -293,7 +293,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32,
}
}
if !uidMapped {
- return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
+ return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
}
gidMapped := true
for _, m := range gidmap {
@@ -305,7 +305,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32,
}
}
if !gidMapped {
- return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
+ return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
}
return uid, gid, nil
}
@@ -460,3 +460,22 @@ func VerifyTagName(imageSpec string) (types.ImageReference, error) {
}
return ref, nil
}
+
+// Cause returns the most underlying error for the provided one. There is a
+// maximum error depth of 100 to avoid endless loops. An additional error log
+// message will be created if this maximum has reached.
+func Cause(err error) (cause error) {
+ cause = err
+
+ const maxDepth = 100
+ for i := 0; i <= maxDepth; i++ {
+ res := errors.Unwrap(cause)
+ if res == nil {
+ return cause
+ }
+ cause = res
+ }
+
+ logrus.Errorf("Max error depth of %d reached, cannot unwrap until root cause: %v", maxDepth, err)
+ return cause
+}
diff --git a/vendor/github.com/containers/buildah/util/util_uint64.go b/vendor/github.com/containers/buildah/util/util_uint64.go
index b0b922531..e404690e3 100644
--- a/vendor/github.com/containers/buildah/util/util_uint64.go
+++ b/vendor/github.com/containers/buildah/util/util_uint64.go
@@ -1,4 +1,5 @@
-// +build linux,!mips,!mipsle,!mips64,!mips64le
+//go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd
+// +build linux,!mips,!mipsle,!mips64,!mips64le freebsd
package util
diff --git a/vendor/github.com/containers/buildah/util/util_unix.go b/vendor/github.com/containers/buildah/util/util_unix.go
index 29983e40f..8048e26a9 100644
--- a/vendor/github.com/containers/buildah/util/util_unix.go
+++ b/vendor/github.com/containers/buildah/util/util_unix.go
@@ -1,4 +1,5 @@
-// +build linux darwin
+//go:build linux || darwin || freebsd
+// +build linux darwin freebsd
package util
diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go
index 1cba29143..7570f2633 100644
--- a/vendor/github.com/containers/common/libimage/copier.go
+++ b/vendor/github.com/containers/common/libimage/copier.go
@@ -2,6 +2,8 @@ package libimage
import (
"context"
+ "errors"
+ "fmt"
"io"
"os"
"strings"
@@ -17,7 +19,6 @@ import (
storageTransport "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -343,12 +344,12 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
// Sanity checks for Buildah.
if sourceInsecure != nil && *sourceInsecure {
if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
- return nil, errors.Errorf("can't require tls verification on an insecured registry")
+ return nil, fmt.Errorf("can't require tls verification on an insecured registry")
}
}
if destinationInsecure != nil && *destinationInsecure {
if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
- return nil, errors.Errorf("can't require tls verification on an insecured registry")
+ return nil, fmt.Errorf("can't require tls verification on an insecured registry")
}
}
@@ -402,7 +403,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err
AllowedRegistries []string `json:"allowedRegistries,omitempty"`
}
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
- return nil, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
+ return nil, fmt.Errorf("error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err)
}
blocked := false
if len(sources.BlockedRegistries) > 0 {
@@ -413,7 +414,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err
}
}
if blocked {
- return nil, errors.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources)
+ return nil, fmt.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources)
}
allowed := true
if len(sources.AllowedRegistries) > 0 {
@@ -425,7 +426,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err
}
}
if !allowed {
- return nil, errors.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources)
+ return nil, fmt.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources)
}
for _, inseureDomain := range sources.InsecureRegistries {
diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go
index f9f73f527..3968c5fdc 100644
--- a/vendor/github.com/containers/common/libimage/filters.go
+++ b/vendor/github.com/containers/common/libimage/filters.go
@@ -11,7 +11,6 @@ import (
filtersPkg "github.com/containers/common/pkg/filters"
"github.com/containers/common/pkg/timetype"
"github.com/containers/image/v5/docker/reference"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -102,7 +101,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp
} else {
split = strings.SplitN(f, "=", 2)
if len(split) != 2 {
- return nil, errors.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value")
+ return nil, fmt.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value")
}
}
@@ -186,7 +185,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp
filter = filterBefore(until)
default:
- return nil, errors.Errorf("unsupported image filter %q", key)
+ return nil, fmt.Errorf("unsupported image filter %q", key)
}
if negate {
filter = negateFilter(filter)
@@ -206,7 +205,7 @@ func negateFilter(f filterFunc) filterFunc {
func (r *Runtime) containers(duplicate map[string]string, key, value string, externalFunc IsExternalContainerFunc) error {
if exists, ok := duplicate[key]; ok && exists != value {
- return errors.Errorf("specifying %q filter more than once with different values is not supported", key)
+ return fmt.Errorf("specifying %q filter more than once with different values is not supported", key)
}
duplicate[key] = value
switch value {
@@ -237,19 +236,19 @@ func (r *Runtime) until(value string) (time.Time, error) {
func (r *Runtime) time(key, value string) (*Image, error) {
img, _, err := r.LookupImage(value, nil)
if err != nil {
- return nil, errors.Wrapf(err, "could not find local image for filter filter %q=%q", key, value)
+ return nil, fmt.Errorf("could not find local image for filter filter %q=%q: %w", key, value, err)
}
return img, nil
}
func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, error) {
if exists, ok := duplicate[key]; ok && exists != value {
- return false, errors.Errorf("specifying %q filter more than once with different values is not supported", key)
+ return false, fmt.Errorf("specifying %q filter more than once with different values is not supported", key)
}
duplicate[key] = value
set, err := strconv.ParseBool(value)
if err != nil {
- return false, errors.Wrapf(err, "non-boolean value %q for %s filter", key, value)
+ return false, fmt.Errorf("non-boolean value %q for %s filter: %w", key, value, err)
}
return set, nil
}
diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go
index d7c4fcd51..b1866fa9b 100644
--- a/vendor/github.com/containers/common/libimage/image.go
+++ b/vendor/github.com/containers/common/libimage/image.go
@@ -2,6 +2,7 @@ package libimage
import (
"context"
+ "errors"
"fmt"
"path/filepath"
"sort"
@@ -16,7 +17,6 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/opencontainers/go-digest"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -54,7 +54,7 @@ func (i *Image) reload() error {
logrus.Tracef("Reloading image %s", i.ID())
img, err := i.runtime.store.Image(i.ID())
if err != nil {
- return errors.Wrap(err, "reloading image")
+ return fmt.Errorf("reloading image: %w", err)
}
i.storageImage = img
i.cached.imageSource = nil
@@ -81,7 +81,7 @@ func (i *Image) isCorrupted(name string) error {
if name == "" {
name = i.ID()[:12]
}
- return errors.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err)
+ return fmt.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err)
}
return nil
}
@@ -195,7 +195,7 @@ func (i *Image) Labels(ctx context.Context) (map[string]string, error) {
if err != nil {
isManifestList, listErr := i.IsManifestList(ctx)
if listErr != nil {
- err = errors.Wrapf(err, "fallback error checking whether image is a manifest list: %v", err)
+ err = fmt.Errorf("fallback error checking whether image is a manifest list: %v: %w", err, err)
} else if isManifestList {
logrus.Debugf("Ignoring error: cannot return labels for manifest list or image index %s", i.ID())
return nil, nil
@@ -305,7 +305,7 @@ func (i *Image) removeContainers(options *RemoveImagesOptions) error {
for _, cID := range containers {
if err := i.runtime.store.DeleteContainer(cID); err != nil {
// If the container does not exist anymore, we're good.
- if errors.Cause(err) != storage.ErrContainerUnknown {
+ if !errors.Is(err, storage.ErrContainerUnknown) {
multiE = multierror.Append(multiE, err)
}
}
@@ -361,7 +361,7 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma
logrus.Debugf("Removing image %s", i.ID())
if i.IsReadOnly() {
- return processedIDs, errors.Errorf("cannot remove read-only image %q", i.ID())
+ return processedIDs, fmt.Errorf("cannot remove read-only image %q", i.ID())
}
if i.runtime.eventChannel != nil {
@@ -384,15 +384,12 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma
// have a closer look at the errors. On top, image removal should be
// tolerant toward corrupted images.
handleError := func(err error) error {
- switch errors.Cause(err) {
- case storage.ErrImageUnknown, storage.ErrNotAnImage, storage.ErrLayerUnknown:
- // The image or layers of the image may already
- // have been removed in which case we consider
- // the image to be removed.
+ if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, storage.ErrNotAnImage) || errors.Is(err, storage.ErrLayerUnknown) {
+ // The image or layers of the image may already have been removed
+ // in which case we consider the image to be removed.
return nil
- default:
- return err
}
+ return err
}
// Calculate the size if requested. `podman-image-prune` likes to
@@ -421,11 +418,11 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma
byDigest := strings.HasPrefix(referencedBy, "sha256:")
if !options.Force {
if byID && numNames > 1 {
- return processedIDs, errors.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names())
+ return processedIDs, fmt.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names())
} else if byDigest && numNames > 1 {
// FIXME - Docker will remove the digest but containers storage
// does not support that yet, so our hands are tied.
- return processedIDs, errors.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names())
+ return processedIDs, fmt.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names())
}
}
@@ -509,16 +506,16 @@ var errTagDigest = errors.New("tag by digest not supported")
// storage. The name is normalized according to the rules of NormalizeName.
func (i *Image) Tag(name string) error {
if strings.HasPrefix(name, "sha256:") { // ambiguous input
- return errors.Wrap(errTagDigest, name)
+ return fmt.Errorf("%s: %w", name, errTagDigest)
}
ref, err := NormalizeName(name)
if err != nil {
- return errors.Wrapf(err, "normalizing name %q", name)
+ return fmt.Errorf("normalizing name %q: %w", name, err)
}
if _, isDigested := ref.(reference.Digested); isDigested {
- return errors.Wrap(errTagDigest, name)
+ return fmt.Errorf("%s: %w", name, errTagDigest)
}
logrus.Debugf("Tagging image %s with %q", i.ID(), ref.String())
@@ -546,12 +543,12 @@ var errUntagDigest = errors.New("untag by digest not supported")
// of NormalizeName.
func (i *Image) Untag(name string) error {
if strings.HasPrefix(name, "sha256:") { // ambiguous input
- return errors.Wrap(errUntagDigest, name)
+ return fmt.Errorf("%s: %w", name, errUntagDigest)
}
ref, err := NormalizeName(name)
if err != nil {
- return errors.Wrapf(err, "normalizing name %q", name)
+ return fmt.Errorf("normalizing name %q: %w", name, err)
}
// FIXME: this is breaking Podman CI but must be re-enabled once
@@ -560,9 +557,9 @@ func (i *Image) Untag(name string) error {
//
// !!! Also make sure to re-enable the tests !!!
//
- // if _, isDigested := ref.(reference.Digested); isDigested {
- // return errors.Wrap(errUntagDigest, name)
- // }
+ // if _, isDigested := ref.(reference.Digested); isDigested {
+ // return fmt.Errorf("%s: %w", name, errUntagDigest)
+ // }
name = ref.String()
@@ -582,7 +579,7 @@ func (i *Image) Untag(name string) error {
}
if !removedName {
- return errors.Wrap(errTagUnknown, name)
+ return fmt.Errorf("%s: %w", name, errTagUnknown)
}
if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil {
@@ -731,7 +728,7 @@ func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel str
func (i *Image) Mountpoint() (string, error) {
mountedTimes, err := i.runtime.store.Mounted(i.TopLayer())
if err != nil || mountedTimes == 0 {
- if errors.Cause(err) == storage.ErrLayerUnknown {
+ if errors.Is(err, storage.ErrLayerUnknown) {
// Can happen, Podman did it, but there's no
// explanation why.
err = nil
@@ -943,7 +940,7 @@ func getImageID(ctx context.Context, src types.ImageReference, sys *types.System
}()
imageDigest := newImg.ConfigInfo().Digest
if err = imageDigest.Validate(); err != nil {
- return "", errors.Wrapf(err, "getting config info")
+ return "", fmt.Errorf("getting config info: %w", err)
}
return "@" + imageDigest.Encoded(), nil
}
diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go
index 683a2dc98..b311aa22e 100644
--- a/vendor/github.com/containers/common/libimage/image_config.go
+++ b/vendor/github.com/containers/common/libimage/image_config.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/common/pkg/signal"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported
@@ -44,7 +43,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
if len(split) != 2 {
split = strings.SplitN(change, "=", 2)
if len(split) != 2 {
- return nil, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
+ return nil, fmt.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
}
}
@@ -54,7 +53,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
case "USER":
// Assume literal contents are the user.
if value == "" {
- return nil, errors.Errorf("invalid change %q - must provide a value to USER", change)
+ return nil, fmt.Errorf("invalid change %q - must provide a value to USER", change)
}
config.User = value
case "EXPOSE":
@@ -63,14 +62,14 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
// Protocol must be "tcp" or "udp"
splitPort := strings.Split(value, "/")
if len(splitPort) > 2 {
- return nil, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
+ return nil, fmt.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
}
portNum, err := strconv.Atoi(splitPort[0])
if err != nil {
- return nil, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change)
+ return nil, fmt.Errorf("invalid change %q - EXPOSE port must be an integer: %w", change, err)
}
if portNum > 65535 || portNum <= 0 {
- return nil, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
+ return nil, fmt.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
}
proto := "tcp"
if len(splitPort) > 1 {
@@ -79,7 +78,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
case "tcp", "udp":
proto = testProto
default:
- return nil, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
+ return nil, fmt.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
}
}
if config.ExposedPorts == nil {
@@ -101,7 +100,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
key = splitEnv[0]
// We do need a key
if key == "" {
- return nil, errors.Errorf("invalid change %q - ENV must have at least one argument", change)
+ return nil, fmt.Errorf("invalid change %q - ENV must have at least one argument", change)
}
// Perfectly valid to not have a value
if len(splitEnv) == 2 {
@@ -163,11 +162,11 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
testUnmarshal = strings.Split(value, " ")
}
if len(testUnmarshal) == 0 {
- return nil, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
+ return nil, fmt.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
}
for _, vol := range testUnmarshal {
if vol == "" {
- return nil, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change)
+ return nil, fmt.Errorf("invalid change %q - VOLUME paths must not be empty", change)
}
if config.Volumes == nil {
config.Volumes = make(map[string]struct{})
@@ -181,7 +180,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
// WORKDIR c results in /A/b/c
// Just need to check it's not empty...
if value == "" {
- return nil, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
+ return nil, fmt.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
}
config.WorkingDir = filepath.Join(config.WorkingDir, value)
case "LABEL":
@@ -198,7 +197,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
splitLabel := strings.SplitN(value, "=", 2)
// Unlike ENV, LABEL must have a value
if len(splitLabel) != 2 {
- return nil, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change)
+ return nil, fmt.Errorf("invalid change %q - LABEL must be formatted key=value", change)
}
key = splitLabel[0]
val = splitLabel[1]
@@ -211,7 +210,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
}
// Check key after we strip quotations
if key == "" {
- return nil, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change)
+ return nil, fmt.Errorf("invalid change %q - LABEL must have a non-empty key", change)
}
if config.Labels == nil {
config.Labels = make(map[string]string)
@@ -221,17 +220,17 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
// Check the provided signal for validity.
killSignal, err := signal.ParseSignal(value)
if err != nil {
- return nil, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change)
+ return nil, fmt.Errorf("invalid change %q - KILLSIGNAL must be given a valid signal: %w", change, err)
}
config.StopSignal = fmt.Sprintf("%d", killSignal)
case "ONBUILD":
// Onbuild always appends.
if value == "" {
- return nil, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change)
+ return nil, fmt.Errorf("invalid change %q - ONBUILD must be given an argument", change)
}
config.OnBuild = append(config.OnBuild, value)
default:
- return nil, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
+ return nil, fmt.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
}
}
diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go
index 3db392784..f557db626 100644
--- a/vendor/github.com/containers/common/libimage/import.go
+++ b/vendor/github.com/containers/common/libimage/import.go
@@ -2,6 +2,7 @@ package libimage
import (
"context"
+ "errors"
"fmt"
"net/url"
"os"
@@ -10,7 +11,6 @@ import (
storageTransport "github.com/containers/image/v5/storage"
tarballTransport "github.com/containers/image/v5/tarball"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -117,7 +117,7 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption
if options.Tag != "" {
image, _, err := r.LookupImage(name, nil)
if err != nil {
- return "", errors.Wrap(err, "looking up imported image")
+ return "", fmt.Errorf("looking up imported image: %w", err)
}
if err := image.Tag(options.Tag); err != nil {
return "", err
diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go
index 4e8959004..4480df548 100644
--- a/vendor/github.com/containers/common/libimage/manifest_list.go
+++ b/vendor/github.com/containers/common/libimage/manifest_list.go
@@ -2,6 +2,7 @@ package libimage
import (
"context"
+ "errors"
"fmt"
"time"
@@ -13,7 +14,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// NOTE: the abstractions and APIs here are a first step to further merge
@@ -145,7 +145,7 @@ func (m *ManifestList) LookupInstance(ctx context.Context, architecture, os, var
}
}
- return nil, errors.Wrapf(storage.ErrImageUnknown, "could not find image instance %s of manifest list %s in local containers storage", instanceDigest, m.ID())
+ return nil, fmt.Errorf("could not find image instance %s of manifest list %s in local containers storage: %w", instanceDigest, m.ID(), storage.ErrImageUnknown)
}
// Saves the specified manifest list and reloads it from storage with the new ID.
@@ -169,6 +169,21 @@ func (m *ManifestList) saveAndReload() error {
return nil
}
+// Reload the image and list instances from storage
+func (m *ManifestList) reload() error {
+ listID := m.ID()
+ if err := m.image.reload(); err != nil {
+ return err
+ }
+ image, list, err := m.image.runtime.lookupManifestList(listID)
+ if err != nil {
+ return err
+ }
+ m.image = image
+ m.list = list
+ return nil
+}
+
// getManifestList is a helper to obtain a manifest list
func (i *Image) getManifestList() (manifests.List, error) {
_, list, err := manifests.LoadFromImage(i.runtime.store, i.ID())
@@ -253,7 +268,17 @@ func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestLi
Password: options.Password,
}
}
-
+ locker, err := manifests.LockerForImage(m.image.runtime.store, m.ID())
+ if err != nil {
+ return "", err
+ }
+ locker.Lock()
+ defer locker.Unlock()
+ // Make sure to reload the image from the containers storage to fetch
+ // the latest data (e.g., new or delete digests).
+ if err := m.reload(); err != nil {
+ return "", err
+ }
newDigest, err := m.list.Add(ctx, systemContext, ref, options.All)
if err != nil {
return "", err
diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go
index 2624dee78..8404da9c5 100644
--- a/vendor/github.com/containers/common/libimage/manifests/manifests.go
+++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go
@@ -3,7 +3,9 @@ package manifests
import (
"context"
"encoding/json"
+ "errors"
stderrors "errors"
+ "fmt"
"io"
"github.com/containers/common/pkg/manifests"
@@ -21,7 +23,6 @@ import (
"github.com/containers/storage/pkg/lockfile"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -83,11 +84,11 @@ func Create() List {
func LoadFromImage(store storage.Store, image string) (string, List, error) {
img, err := store.Image(image)
if err != nil {
- return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image)
+ return "", nil, fmt.Errorf("error locating image %q for loading manifest list: %w", image, err)
}
manifestBytes, err := store.ImageBigData(img.ID, storage.ImageDigestManifestBigDataNamePrefix)
if err != nil {
- return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image)
+ return "", nil, fmt.Errorf("error locating image %q for loading manifest list: %w", image, err)
}
manifestList, err := manifests.FromBlob(manifestBytes)
if err != nil {
@@ -99,10 +100,10 @@ func LoadFromImage(store storage.Store, image string) (string, List, error) {
}
instancesBytes, err := store.ImageBigData(img.ID, instancesData)
if err != nil {
- return "", nil, errors.Wrapf(err, "error locating image %q for loading instance list", image)
+ return "", nil, fmt.Errorf("error locating image %q for loading instance list: %w", image, err)
}
if err := json.Unmarshal(instancesBytes, &list.instances); err != nil {
- return "", nil, errors.Wrapf(err, "error decoding instance list for image %q", image)
+ return "", nil, fmt.Errorf("error decoding instance list for image %q: %w", image, err)
}
list.instances[""] = img.ID
return img.ID, list, err
@@ -122,7 +123,7 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string,
return "", err
}
img, err := store.CreateImage(imageID, names, "", "", &storage.ImageOptions{})
- if err == nil || errors.Cause(err) == storage.ErrDuplicateID {
+ if err == nil || errors.Is(err, storage.ErrDuplicateID) {
created := (err == nil)
if created {
imageID = img.ID
@@ -135,7 +136,7 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string,
logrus.Errorf("Deleting image %q after failing to save manifest for it", img.ID)
}
}
- return "", errors.Wrapf(err, "saving manifest list to image %q", imageID)
+ return "", fmt.Errorf("saving manifest list to image %q: %w", imageID, err)
}
err = store.SetImageBigData(imageID, instancesData, instancesBytes, nil)
if err != nil {
@@ -144,22 +145,22 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string,
logrus.Errorf("Deleting image %q after failing to save instance locations for it", img.ID)
}
}
- return "", errors.Wrapf(err, "saving instance list to image %q", imageID)
+ return "", fmt.Errorf("saving instance list to image %q: %w", imageID, err)
}
return imageID, nil
}
- return "", errors.Wrapf(err, "error creating image to hold manifest list")
+ return "", fmt.Errorf("error creating image to hold manifest list: %w", err)
}
// Reference returns an image reference for the composite image being built
// in the list, or an error if the list has never been saved to a local image.
func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) {
if l.instances[""] == "" {
- return nil, errors.Wrap(ErrListImageUnknown, "error building reference to list")
+ return nil, fmt.Errorf("error building reference to list: %w", ErrListImageUnknown)
}
s, err := is.Transport.ParseStoreReference(store, l.instances[""])
if err != nil {
- return nil, errors.Wrapf(err, "error creating ImageReference from image %q", l.instances[""])
+ return nil, fmt.Errorf("error creating ImageReference from image %q: %w", l.instances[""], err)
}
references := make([]types.ImageReference, 0, len(l.instances))
whichInstances := make([]digest.Digest, 0, len(l.instances))
@@ -183,7 +184,7 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in
imageName := l.instances[instance]
ref, err := alltransports.ParseImageName(imageName)
if err != nil {
- return nil, errors.Wrapf(err, "error creating ImageReference from image %q", imageName)
+ return nil, fmt.Errorf("error creating ImageReference from image %q: %w", imageName, err)
}
references = append(references, ref)
}
@@ -195,7 +196,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push
// Load the system signing policy.
pushPolicy, err := signature.DefaultPolicy(options.SystemContext)
if err != nil {
- return nil, "", errors.Wrapf(err, "error obtaining default signature policy")
+ return nil, "", fmt.Errorf("error obtaining default signature policy: %w", err)
}
// Override the settings for local storage to make sure that we can always read the source "image".
@@ -203,7 +204,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push
policyContext, err := signature.NewPolicyContext(pushPolicy)
if err != nil {
- return nil, "", errors.Wrapf(err, "error creating new signature policy context")
+ return nil, "", fmt.Errorf("error creating new signature policy context: %w", err)
}
defer func() {
if err2 := policyContext.Destroy(); err2 != nil {
@@ -266,7 +267,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push
func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) {
src, err := ref.NewImageSource(ctx, sys)
if err != nil {
- return "", errors.Wrapf(err, "error setting up to read manifest and configuration from %q", transports.ImageName(ref))
+ return "", fmt.Errorf("error setting up to read manifest and configuration from %q: %w", transports.ImageName(ref), err)
}
defer src.Close()
@@ -281,13 +282,13 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
primaryManifestBytes, primaryManifestType, err := src.GetManifest(ctx, nil)
if err != nil {
- return "", errors.Wrapf(err, "error reading manifest from %q", transports.ImageName(ref))
+ return "", fmt.Errorf("error reading manifest from %q: %w", transports.ImageName(ref), err)
}
if manifest.MIMETypeIsMultiImage(primaryManifestType) {
lists, err := manifests.FromBlob(primaryManifestBytes)
if err != nil {
- return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref))
+ return "", fmt.Errorf("error parsing manifest list in %q: %w", transports.ImageName(ref), err)
}
if all {
for i, instance := range lists.OCIv1().Manifests {
@@ -311,11 +312,11 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
} else {
list, err := manifest.ListFromBlob(primaryManifestBytes, primaryManifestType)
if err != nil {
- return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref))
+ return "", fmt.Errorf("error parsing manifest list in %q: %w", transports.ImageName(ref), err)
}
instanceDigest, err := list.ChooseInstance(sys)
if err != nil {
- return "", errors.Wrapf(err, "error selecting image from manifest list in %q", transports.ImageName(ref))
+ return "", fmt.Errorf("error selecting image from manifest list in %q: %w", transports.ImageName(ref), err)
}
added := false
for i, instance := range lists.OCIv1().Manifests {
@@ -357,11 +358,11 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
if instanceInfo.OS == "" || instanceInfo.Architecture == "" {
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest))
if err != nil {
- return "", errors.Wrapf(err, "error reading configuration blob from %q", transports.ImageName(ref))
+ return "", fmt.Errorf("error reading configuration blob from %q: %w", transports.ImageName(ref), err)
}
config, err := img.OCIConfig(ctx)
if err != nil {
- return "", errors.Wrapf(err, "error reading info about config blob from %q", transports.ImageName(ref))
+ return "", fmt.Errorf("error reading info about config blob from %q: %w", transports.ImageName(ref), err)
}
if instanceInfo.OS == "" {
instanceInfo.OS = config.OS
@@ -375,12 +376,12 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
}
manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest)
if err != nil {
- return "", errors.Wrapf(err, "error reading manifest from %q, instance %q", transports.ImageName(ref), instanceInfo.instanceDigest)
+ return "", fmt.Errorf("error reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err)
}
if instanceInfo.instanceDigest == nil {
manifestDigest, err = manifest.Digest(manifestBytes)
if err != nil {
- return "", errors.Wrapf(err, "error computing digest of manifest from %q", transports.ImageName(ref))
+ return "", fmt.Errorf("error computing digest of manifest from %q: %w", transports.ImageName(ref), err)
}
instanceInfo.instanceDigest = &manifestDigest
instanceInfo.Size = int64(len(manifestBytes))
@@ -389,7 +390,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
}
err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations)
if err != nil {
- return "", errors.Wrapf(err, "error adding instance with digest %q", *instanceInfo.instanceDigest)
+ return "", fmt.Errorf("error adding instance with digest %q: %w", *instanceInfo.instanceDigest, err)
}
if _, ok := l.instances[*instanceInfo.instanceDigest]; !ok {
l.instances[*instanceInfo.instanceDigest] = transports.ImageName(ref)
@@ -416,11 +417,11 @@ func (l *list) Remove(instanceDigest digest.Digest) error {
func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) {
img, err := store.Image(image)
if err != nil {
- return nil, errors.Wrapf(err, "locating image %q for locating lock", image)
+ return nil, fmt.Errorf("locating image %q for locating lock: %w", image, err)
}
d := digest.NewDigestFromEncoded(digest.Canonical, img.ID)
if err := d.Validate(); err != nil {
- return nil, errors.Wrapf(err, "coercing image ID for %q into a digest", image)
+ return nil, fmt.Errorf("coercing image ID for %q into a digest: %w", image, err)
}
return store.GetDigestLock(d)
}
diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go
index 7af125283..be2d30206 100644
--- a/vendor/github.com/containers/common/libimage/normalize.go
+++ b/vendor/github.com/containers/common/libimage/normalize.go
@@ -1,10 +1,10 @@
package libimage
import (
+ "fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -18,12 +18,12 @@ func NormalizeName(name string) (reference.Named, error) {
// NOTE: this code is in symmetrie with containers/image/pkg/shortnames.
ref, err := reference.Parse(name)
if err != nil {
- return nil, errors.Wrapf(err, "error normalizing name %q", name)
+ return nil, fmt.Errorf("error normalizing name %q: %w", name, err)
}
named, ok := ref.(reference.Named)
if !ok {
- return nil, errors.Errorf("%q is not a named reference", name)
+ return nil, fmt.Errorf("%q is not a named reference", name)
}
// Enforce "localhost" if needed.
diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go
index 2071cceca..86c9ebef1 100644
--- a/vendor/github.com/containers/common/libimage/pull.go
+++ b/vendor/github.com/containers/common/libimage/pull.go
@@ -2,6 +2,7 @@ package libimage
import (
"context"
+ "errors"
"fmt"
"io"
"runtime"
@@ -23,7 +24,6 @@ import (
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
ociSpec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -74,7 +74,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
// In fact, we need to since they are not parseable.
if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.ContainsAny(name, "/.:@")) {
if pullPolicy == config.PullPolicyAlways {
- return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", name)
+ return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", name)
}
local, _, err := r.LookupImage(name, nil)
if err != nil {
@@ -113,7 +113,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
}
if options.AllTags && ref.Transport().Name() != registryTransport.Transport.Name() {
- return nil, errors.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name())
+ return nil, fmt.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name())
}
if r.eventChannel != nil {
@@ -163,7 +163,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
for _, name := range pulledImages {
image, _, err := r.LookupImage(name, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error locating pulled image %q name in containers storage", name)
+ return nil, fmt.Errorf("error locating pulled image %q name in containers storage: %w", name, err)
}
// Note that we can ignore the 2nd return value here. Some
@@ -258,7 +258,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference,
storageName = ref.StringWithinTransport()
named := ref.DockerReference()
if named == nil {
- return nil, errors.Errorf("could not get an image name for storage reference %q", ref)
+ return nil, fmt.Errorf("could not get an image name for storage reference %q", ref)
}
imageName = named.String()
@@ -276,7 +276,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference,
// Create a storage reference.
destRef, err := storageTransport.Transport.ParseStoreReference(r.store, storageName)
if err != nil {
- return nil, errors.Wrapf(err, "parsing %q", storageName)
+ return nil, fmt.Errorf("parsing %q: %w", storageName, err)
}
_, err = c.copy(ctx, ref, destRef)
@@ -318,7 +318,7 @@ func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Conte
for _, destName := range destNames {
destRef, err := storageTransport.Transport.ParseStoreReference(r.store, destName)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName)
+ return nil, nil, fmt.Errorf("error parsing dest reference name %#v: %w", destName, err)
}
references = append(references, destRef)
}
@@ -393,13 +393,13 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference
for _, tag := range tags {
select { // Let's be gentle with Podman remote.
case <-ctx.Done():
- return nil, errors.Errorf("pulling cancelled")
+ return nil, fmt.Errorf("pulling cancelled")
default:
// We can continue.
}
tagged, err := reference.WithTag(named, tag)
if err != nil {
- return nil, errors.Wrapf(err, "error creating tagged reference (name %s, tag %s)", named.String(), tag)
+ return nil, fmt.Errorf("error creating tagged reference (name %s, tag %s): %w", named.String(), tag, err)
}
pulled, err := r.copySingleImageFromRegistry(ctx, tagged.String(), pullPolicy, options)
if err != nil {
@@ -423,30 +423,30 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo
if manifest.MIMETypeIsMultiImage(manifestType) {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "parsing manifest list")
+ return nil, fmt.Errorf("parsing manifest list: %w", err)
}
d, err := list.ChooseInstance(sys)
if err != nil {
- return nil, errors.Wrapf(err, "choosing instance from manifest list")
+ return nil, fmt.Errorf("choosing instance from manifest list: %w", err)
}
imageDigest = d
} else {
d, err := manifest.Digest(manifestBytes)
if err != nil {
- return nil, errors.Wrapf(err, "digesting manifest")
+ return nil, fmt.Errorf("digesting manifest")
}
imageDigest = d
}
images, err := r.store.ImagesByDigest(imageDigest)
if err != nil {
- return nil, errors.Wrapf(err, "listing images by manifest digest")
+ return nil, fmt.Errorf("listing images by manifest digest: %w", err)
}
results := make([]string, 0, len(images))
for _, image := range images {
results = append(results, image.ID)
}
if len(results) == 0 {
- return nil, errors.Wrapf(storage.ErrImageUnknown, "identifying new image by manifest digest")
+ return nil, fmt.Errorf("identifying new image by manifest digest: %w", storage.ErrImageUnknown)
}
return results, nil
}
@@ -483,7 +483,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
lookupImageOptions.OS = options.OS
}
localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions)
- if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
+ if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
logrus.Errorf("Looking up %s in local storage: %v", imageName, err)
}
@@ -515,7 +515,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
return []string{resolvedImageName}, nil
}
logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName)
- return nil, errors.Wrap(storage.ErrImageUnknown, imageName)
+ return nil, fmt.Errorf("%s: %w", imageName, storage.ErrImageUnknown)
}
if pullPolicy == config.PullPolicyMissing && localImage != nil {
@@ -526,7 +526,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
if localImage != nil && strings.HasPrefix(localImage.ID(), imageName) {
switch pullPolicy {
case config.PullPolicyAlways:
- return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName)
+ return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName)
default:
return []string{resolvedImageName}, nil
}
@@ -648,7 +648,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
}
if len(pullErrors) == 0 {
- return nil, errors.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy)
+ return nil, fmt.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy)
}
return nil, resolved.FormatPullErrors(pullErrors)
diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go
index 7e975b81d..6030a179b 100644
--- a/vendor/github.com/containers/common/libimage/runtime.go
+++ b/vendor/github.com/containers/common/libimage/runtime.go
@@ -2,6 +2,7 @@ package libimage
import (
"context"
+ "errors"
"fmt"
"os"
"strings"
@@ -15,7 +16,6 @@ import (
"github.com/containers/storage"
deepcopy "github.com/jinzhu/copier"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -160,7 +160,7 @@ func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageRef
// storage. Note that it may return false if an image corrupted.
func (r *Runtime) Exists(name string) (bool, error) {
image, _, err := r.LookupImage(name, nil)
- if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
+ if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
return false, err
}
if image == nil {
@@ -227,7 +227,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
storageRef, err := alltransports.ParseImageName(name)
if err == nil {
if storageRef.Transport().Name() != storageTransport.Transport.Name() {
- return nil, "", errors.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name())
+ return nil, "", fmt.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name())
}
img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef)
if err != nil {
@@ -266,7 +266,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
if img != nil {
return img, originalName, nil
}
- return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName)
+ return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// Unless specified, set the platform specified in the system context
@@ -288,7 +288,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
// "localhost/" prefixed images into account as well.
candidates, err := shortnames.ResolveLocally(&r.systemContext, name)
if err != nil {
- return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
+ return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
// Backwards compat: normalize to docker.io as some users may very well
// rely on that.
@@ -324,7 +324,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) {
logrus.Debugf("Trying %q ...", candidate)
img, err := r.store.Image(candidate)
- if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
+ if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
return nil, err
}
if img == nil {
@@ -342,7 +342,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
// find a matching instance in the local containers storage.
isManifestList, err := image.IsManifestList(context.Background())
if err != nil {
- if errors.Cause(err) == os.ErrNotExist {
+ if errors.Is(err, os.ErrNotExist) {
// We must be tolerant toward corrupted images.
// See containers/podman commit fd9dd7065d44.
logrus.Warnf("Failed to determine if an image is a manifest list: %v, ignoring the error", err)
@@ -356,7 +356,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
}
// return ErrNotAManifestList if lookupManifest is set otherwise try resolving image.
if options.lookupManifest {
- return nil, errors.Wrapf(ErrNotAManifestList, candidate)
+ return nil, fmt.Errorf("%s: %w", candidate, ErrNotAManifestList)
}
}
@@ -372,7 +372,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
logrus.Debug("No matching instance was found: returning manifest list instead")
return image, nil
}
- return nil, errors.Wrap(storage.ErrImageUnknown, err.Error())
+ return nil, fmt.Errorf("%v: %w", err, storage.ErrImageUnknown)
}
ref, err = storageTransport.Transport.ParseStoreReference(r.store, "@"+instance.ID())
if err != nil {
@@ -434,7 +434,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm
}
named, isNamed := ref.(reference.Named)
if !isNamed {
- return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
+ return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
digested, isDigested := named.(reference.Digested)
@@ -454,11 +454,11 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm
}
}
- return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
+ return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
if !shortnames.IsShortName(name) {
- return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
+ return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
@@ -486,7 +486,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm
}
}
- return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
+ return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
// ResolveName resolves the specified name. If the name resolves to a local
@@ -499,7 +499,7 @@ func (r *Runtime) ResolveName(name string) (string, error) {
return "", nil
}
image, resolvedName, err := r.LookupImage(name, nil)
- if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
+ if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
return "", err
}
@@ -713,7 +713,7 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem
for _, id := range toDelete {
del, exists := deleteMap[id]
if !exists {
- appendError(errors.Errorf("internal error: ID %s not in found in image-deletion map", id))
+ appendError(fmt.Errorf("internal error: ID %s not in found in image-deletion map", id))
continue
}
if len(del.referencedBy) == 0 {
diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go
index fed86d4ef..a42bbb497 100644
--- a/vendor/github.com/containers/common/libimage/save.go
+++ b/vendor/github.com/containers/common/libimage/save.go
@@ -2,6 +2,8 @@ package libimage
import (
"context"
+ "errors"
+ "fmt"
"strings"
"time"
@@ -13,7 +15,6 @@ import (
ociTransport "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/types"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -47,10 +48,10 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string,
// All formats support saving 1.
default:
if format != "docker-archive" {
- return errors.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format)
+ return fmt.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format)
}
if len(options.AdditionalTags) > 0 {
- return errors.Errorf("cannot save multiple images with multiple tags")
+ return fmt.Errorf("cannot save multiple images with multiple tags")
}
}
@@ -58,7 +59,7 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string,
switch format {
case "oci-archive", "oci-dir", "docker-dir":
if len(names) > 1 {
- return errors.Errorf("%q does not support saving multiple images (%v)", format, names)
+ return fmt.Errorf("%q does not support saving multiple images (%v)", format, names)
}
return r.saveSingleImage(ctx, names[0], format, path, options)
@@ -67,7 +68,7 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string,
return r.saveDockerArchive(ctx, names, path, options)
}
- return errors.Errorf("unsupported format %q for saving images", format)
+ return fmt.Errorf("unsupported format %q for saving images", format)
}
// saveSingleImage saves the specified image name to the specified path.
@@ -109,7 +110,7 @@ func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string
options.ManifestMIMEType = manifest.DockerV2Schema2MediaType
default:
- return errors.Errorf("unsupported format %q for saving images", format)
+ return fmt.Errorf("unsupported format %q for saving images", format)
}
if err != nil {
@@ -143,7 +144,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st
if err == nil {
tagged, withTag := named.(reference.NamedTagged)
if !withTag {
- return errors.Errorf("invalid additional tag %q: normalized to untagged %q", tag, named.String())
+ return fmt.Errorf("invalid additional tag %q: normalized to untagged %q", tag, named.String())
}
additionalTags = append(additionalTags, tagged)
}
@@ -195,7 +196,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st
for _, id := range orderedIDs {
local, exists := localImages[id]
if !exists {
- return errors.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id)
+ return fmt.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id)
}
copyOpts := options.CopyOptions
diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go
index 204bcc8c7..0b58055b4 100644
--- a/vendor/github.com/containers/common/libimage/search.go
+++ b/vendor/github.com/containers/common/libimage/search.go
@@ -13,7 +13,6 @@ import (
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
)
@@ -84,11 +83,11 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) {
switch arr[0] {
case define.SearchFilterStars:
if len(arr) < 2 {
- return nil, errors.Errorf("invalid filter %q, should be stars=<value>", filter)
+ return nil, fmt.Errorf("invalid filter %q, should be stars=<value>", filter)
}
stars, err := strconv.Atoi(arr[1])
if err != nil {
- return nil, errors.Wrapf(err, "incorrect value type for stars filter")
+ return nil, fmt.Errorf("incorrect value type for stars filter: %w", err)
}
sFilter.Stars = stars
case define.SearchFilterAutomated:
@@ -104,7 +103,7 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) {
sFilter.IsOfficial = types.OptionalBoolTrue
}
default:
- return nil, errors.Errorf("invalid filter type %q", f)
+ return nil, fmt.Errorf("invalid filter type %q", f)
}
}
return sFilter, nil
@@ -273,16 +272,16 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr
dockerPrefix := "docker://"
imageRef, err := alltransports.ParseImageName(fmt.Sprintf("%s/%s", registry, term))
if err == nil && imageRef.Transport().Name() != registryTransport.Transport.Name() {
- return nil, errors.Errorf("reference %q must be a docker reference", term)
+ return nil, fmt.Errorf("reference %q must be a docker reference", term)
} else if err != nil {
imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, fmt.Sprintf("%s/%s", registry, term)))
if err != nil {
- return nil, errors.Errorf("reference %q must be a docker reference", term)
+ return nil, fmt.Errorf("reference %q must be a docker reference", term)
}
}
tags, err := registryTransport.GetRepositoryTags(ctx, sys, imageRef)
if err != nil {
- return nil, errors.Errorf("error getting repository tags: %v", err)
+ return nil, fmt.Errorf("error getting repository tags: %v", err)
}
limit := searchMaxQueries
if len(tags) < limit {
diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
index 96a2a9a4a..de6adbdc7 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
@@ -5,6 +5,8 @@ package cni
import (
"encoding/json"
+ "errors"
+ "fmt"
"io/ioutil"
"net"
"os"
@@ -18,7 +20,6 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/libnetwork/util"
pkgutil "github.com/containers/common/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -35,7 +36,7 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
cniJSON := make(map[string]interface{})
err := json.Unmarshal(conf.Bytes, &cniJSON)
if err != nil {
- return nil, errors.Wrapf(err, "failed to unmarshal network config %s", conf.Name)
+ return nil, fmt.Errorf("failed to unmarshal network config %s: %w", conf.Name, err)
}
if args, ok := cniJSON["args"]; ok {
if key, ok := args.(map[string]interface{}); ok {
@@ -59,7 +60,7 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
var bridge hostLocalBridge
err := json.Unmarshal(firstPlugin.Bytes, &bridge)
if err != nil {
- return nil, errors.Wrapf(err, "failed to unmarshal the bridge plugin config in %s", confPath)
+ return nil, fmt.Errorf("failed to unmarshal the bridge plugin config in %s: %w", confPath, err)
}
network.NetworkInterface = bridge.BrName
@@ -70,10 +71,10 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
// set network options
if bridge.MTU != 0 {
- network.Options["mtu"] = strconv.Itoa(bridge.MTU)
+ network.Options[types.MTUOption] = strconv.Itoa(bridge.MTU)
}
if bridge.Vlan != 0 {
- network.Options["vlan"] = strconv.Itoa(bridge.Vlan)
+ network.Options[types.VLANOption] = strconv.Itoa(bridge.Vlan)
}
err = convertIPAMConfToNetwork(&network, &bridge.IPAM, confPath)
@@ -85,17 +86,17 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
var vlan VLANConfig
err := json.Unmarshal(firstPlugin.Bytes, &vlan)
if err != nil {
- return nil, errors.Wrapf(err, "failed to unmarshal the macvlan plugin config in %s", confPath)
+ return nil, fmt.Errorf("failed to unmarshal the macvlan plugin config in %s: %w", confPath, err)
}
network.NetworkInterface = vlan.Master
// set network options
if vlan.MTU != 0 {
- network.Options["mtu"] = strconv.Itoa(vlan.MTU)
+ network.Options[types.MTUOption] = strconv.Itoa(vlan.MTU)
}
if vlan.Mode != "" {
- network.Options["mode"] = vlan.Mode
+ network.Options[types.ModeOption] = vlan.Mode
}
err = convertIPAMConfToNetwork(&network, &vlan.IPAM, confPath)
@@ -110,18 +111,31 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
}
// check if the dnsname plugin is configured
- network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname")
+ network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname") != nil
+
+ // now get isolation mode from firewall plugin
+ firewall := findPluginByName(conf.Plugins, "firewall")
+ if firewall != nil {
+ var firewallConf firewallConfig
+ err := json.Unmarshal(firewall.Bytes, &firewallConf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal the firewall plugin config in %s: %w", confPath, err)
+ }
+ if firewallConf.IngressPolicy == ingressPolicySameBridge {
+ network.Options[types.IsolateOption] = "true"
+ }
+ }
return &network, nil
}
-func findPluginByName(plugins []*libcni.NetworkConfig, name string) bool {
- for _, plugin := range plugins {
- if plugin.Network.Type == name {
- return true
+func findPluginByName(plugins []*libcni.NetworkConfig, name string) *libcni.NetworkConfig {
+ for i := range plugins {
+ if plugins[i].Network.Type == name {
+ return plugins[i]
}
}
- return false
+ return nil
}
// convertIPAMConfToNetwork converts A cni IPAMConfig to libpod network subnets.
@@ -151,7 +165,7 @@ func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath
if ipam.Gateway != "" {
gateway = net.ParseIP(ipam.Gateway)
if gateway == nil {
- return errors.Errorf("failed to parse gateway ip %s", ipam.Gateway)
+ return fmt.Errorf("failed to parse gateway ip %s", ipam.Gateway)
}
// convert to 4 byte if ipv4
util.NormalizeIP(&gateway)
@@ -159,7 +173,7 @@ func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath
// only add a gateway address if the network is not internal
gateway, err = util.FirstIPInSubnet(sub)
if err != nil {
- return errors.Errorf("failed to get first ip in subnet %s", sub.String())
+ return fmt.Errorf("failed to get first ip in subnet %s", sub.String())
}
}
s.Gateway = gateway
@@ -169,13 +183,13 @@ func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath
if ipam.RangeStart != "" {
rangeStart = net.ParseIP(ipam.RangeStart)
if rangeStart == nil {
- return errors.Errorf("failed to parse range start ip %s", ipam.RangeStart)
+ return fmt.Errorf("failed to parse range start ip %s", ipam.RangeStart)
}
}
if ipam.RangeEnd != "" {
rangeEnd = net.ParseIP(ipam.RangeEnd)
if rangeEnd == nil {
- return errors.Errorf("failed to parse range end ip %s", ipam.RangeEnd)
+ return fmt.Errorf("failed to parse range end ip %s", ipam.RangeEnd)
}
}
if rangeStart != nil || rangeEnd != nil {
@@ -267,7 +281,7 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ
case types.NoneIPAMDriver:
// do nothing
default:
- return nil, "", errors.Errorf("unsupported ipam driver %q", ipamDriver)
+ return nil, "", fmt.Errorf("unsupported ipam driver %q", ipamDriver)
}
opts, err := parseOptions(network.Options, network.Driver)
@@ -291,7 +305,7 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ
switch network.Driver {
case types.BridgeNetworkDriver:
bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, opts.mtu, opts.vlan, ipamConf)
- plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(), newTuningPlugin())
+ plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(opts.isolate), newTuningPlugin())
// if we find the dnsname plugin we add configuration for it
if hasDNSNamePlugin(n.cniPluginDirs) && network.DNSEnabled {
// Note: in the future we might like to allow for dynamic domain names
@@ -305,7 +319,7 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ
plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, opts.vlanPluginMode, opts.mtu, ipamConf))
default:
- return nil, "", errors.Errorf("driver %q is not supported by cni", network.Driver)
+ return nil, "", fmt.Errorf("driver %q is not supported by cni", network.Driver)
}
ncList["plugins"] = plugins
b, err := json.MarshalIndent(ncList, "", " ")
@@ -344,7 +358,7 @@ func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry
for _, protocol := range protocols {
if !pkgutil.StringInSlice(protocol, []string{"tcp", "udp", "sctp"}) {
- return nil, errors.Errorf("unknown port protocol %s", protocol)
+ return nil, fmt.Errorf("unknown port protocol %s", protocol)
}
cniPort := cniPortMapEntry{
HostPort: int(port.HostPort),
@@ -382,6 +396,7 @@ type options struct {
vlan int
mtu int
vlanPluginMode string
+ isolate bool
}
func parseOptions(networkOptions map[string]string, networkDriver string) (*options, error) {
@@ -389,35 +404,44 @@ func parseOptions(networkOptions map[string]string, networkDriver string) (*opti
var err error
for k, v := range networkOptions {
switch k {
- case "mtu":
+ case types.MTUOption:
opt.mtu, err = internalutil.ParseMTU(v)
if err != nil {
return nil, err
}
- case "vlan":
+ case types.VLANOption:
opt.vlan, err = internalutil.ParseVlan(v)
if err != nil {
return nil, err
}
- case "mode":
+ case types.ModeOption:
switch networkDriver {
case types.MacVLANNetworkDriver:
if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) {
- return nil, errors.Errorf("unknown macvlan mode %q", v)
+ return nil, fmt.Errorf("unknown macvlan mode %q", v)
}
case types.IPVLANNetworkDriver:
if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) {
- return nil, errors.Errorf("unknown ipvlan mode %q", v)
+ return nil, fmt.Errorf("unknown ipvlan mode %q", v)
}
default:
- return nil, errors.Errorf("cannot set option \"mode\" with driver %q", networkDriver)
+ return nil, fmt.Errorf("cannot set option \"mode\" with driver %q", networkDriver)
}
opt.vlanPluginMode = v
+ case types.IsolateOption:
+ if networkDriver != types.BridgeNetworkDriver {
+ return nil, errors.New("isolate option is only supported with the bridge driver")
+ }
+ opt.isolate, err = strconv.ParseBool(v)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse isolate option: %w", err)
+ }
+
default:
- return nil, errors.Errorf("unsupported network option %s", k)
+ return nil, fmt.Errorf("unsupported network option %s", k)
}
}
return opt, nil
diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
index a407a8dea..cfc5b33bd 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
@@ -26,6 +26,9 @@ const (
// podmanOptionsKey key used to store the podman network options in a cni config
podmanOptionsKey = "podman_options"
+
+ // ingressPolicySameBridge is used to only allow connection on the same bridge network
+ ingressPolicySameBridge = "same-bridge"
)
// cniPortMapEntry struct is used by the portmap plugin
@@ -95,8 +98,9 @@ type VLANConfig struct {
// firewallConfig describes the firewall plugin
type firewallConfig struct {
- PluginType string `json:"type"`
- Backend string `json:"backend"`
+ PluginType string `json:"type"`
+ Backend string `json:"backend"`
+ IngressPolicy string `json:"ingressPolicy,omitempty"`
}
// tuningConfig describes the tuning plugin
@@ -222,10 +226,14 @@ func newPortMapPlugin() portMapConfig {
}
// newFirewallPlugin creates a generic firewall plugin
-func newFirewallPlugin() firewallConfig {
- return firewallConfig{
+func newFirewallPlugin(isolate bool) firewallConfig {
+ fw := firewallConfig{
PluginType: "firewall",
}
+ if isolate {
+ fw.IngressPolicy = ingressPolicySameBridge
+ }
+ return fw
}
// newTuningPlugin creates a generic tuning section
diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go
index c86196c17..aa94e73d1 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/config.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/config.go
@@ -4,13 +4,14 @@
package cni
import (
+ "errors"
+ "fmt"
"net"
"os"
internalutil "github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
pkgutil "github.com/containers/common/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -43,7 +44,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (
// FIXME: Should we use a different type for network create without the ID field?
// the caller is not allowed to set a specific ID
if newNetwork.ID != "" {
- return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create")
+ return nil, fmt.Errorf("ID can not be set for network create: %w", types.ErrInvalidArg)
}
err := internalutil.CommonNetworkCreate(n, newNetwork)
@@ -83,7 +84,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (
return nil, err
}
default:
- return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver)
+ return nil, fmt.Errorf("unsupported driver %s: %w", newNetwork.Driver, types.ErrInvalidArg)
}
err = internalutil.ValidateSubnets(newNetwork, !newNetwork.Internal, usedNetworks)
@@ -127,7 +128,7 @@ func (n *cniNetwork) NetworkRemove(nameOrID string) error {
// Removing the default network is not allowed.
if network.libpodNet.Name == n.defaultNetwork {
- return errors.Errorf("default network %s cannot be removed", n.defaultNetwork)
+ return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork)
}
// Remove the bridge network interface on the host.
@@ -193,7 +194,7 @@ func createIPMACVLAN(network *types.Network) error {
return err
}
if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) {
- return errors.Errorf("parent interface %s does not exist", network.NetworkInterface)
+ return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface)
}
}
@@ -224,10 +225,10 @@ func validateIPAMDriver(n *types.Network) error {
case "", types.HostLocalIPAMDriver:
case types.DHCPIPAMDriver, types.NoneIPAMDriver:
if len(n.Subnets) > 0 {
- return errors.Errorf("%s ipam driver is set but subnets are given", ipamDriver)
+ return fmt.Errorf("%s ipam driver is set but subnets are given", ipamDriver)
}
default:
- return errors.Errorf("unsupported ipam driver %q", ipamDriver)
+ return fmt.Errorf("unsupported ipam driver %q", ipamDriver)
}
return nil
}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go
index 561f309d0..fce8f0066 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/network.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/network.go
@@ -7,6 +7,8 @@ import (
"context"
"crypto/sha256"
"encoding/hex"
+ "errors"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -16,7 +18,6 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
"github.com/containers/storage/pkg/lockfile"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -94,7 +95,7 @@ func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
}
defaultNet, err := types.ParseCIDR(defaultSubnet)
if err != nil {
- return nil, errors.Wrap(err, "failed to parse default subnet")
+ return nil, fmt.Errorf("failed to parse default subnet: %w", err)
}
defaultSubnetPools := conf.DefaultsubnetPools
@@ -201,7 +202,7 @@ func (n *cniNetwork) loadNetworks() error {
if networks[n.defaultNetwork] == nil {
networkInfo, err := n.createDefaultNetwork()
if err != nil {
- return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork)
+ return fmt.Errorf("failed to create default network %s: %w", n.defaultNetwork, err)
}
networks[n.defaultNetwork] = networkInfo
}
@@ -243,7 +244,7 @@ func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) {
if strings.HasPrefix(val.libpodNet.ID, nameOrID) {
if net != nil {
- return nil, errors.Errorf("more than one result for network ID %s", nameOrID)
+ return nil, fmt.Errorf("more than one result for network ID %s", nameOrID)
}
net = val
}
@@ -251,7 +252,7 @@ func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) {
if net != nil {
return net, nil
}
- return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID)
+ return nil, fmt.Errorf("unable to find network with name or ID %s: %w", nameOrID, types.ErrNoSuchNetwork)
}
// getNetworkIDFromName creates a network ID from the name. It is just the
diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go
index 35236cf25..2da8da1ad 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/run.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/run.go
@@ -5,6 +5,7 @@ package cni
import (
"context"
+ "fmt"
"net"
"os"
"strings"
@@ -15,7 +16,6 @@ import (
"github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
"github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -36,7 +36,7 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma
err = setupLoopback(namespacePath)
if err != nil {
- return nil, errors.Wrapf(err, "failed to set the loopback adapter up")
+ return nil, fmt.Errorf("failed to set the loopback adapter up: %w", err)
}
var retErr error
@@ -108,7 +108,7 @@ func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) {
for _, nameserver := range cniResult.DNS.Nameservers {
ip := net.ParseIP(nameserver)
if ip == nil {
- return result, errors.Errorf("failed to parse cni nameserver ip %s", nameserver)
+ return result, fmt.Errorf("failed to parse cni nameserver ip %s", nameserver)
}
nameservers = append(nameservers, ip)
}
@@ -133,7 +133,7 @@ func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) {
continue
}
if len(cniResult.Interfaces) <= *ip.Interface {
- return result, errors.Errorf("invalid cni result, interface index %d out of range", *ip.Interface)
+ return result, fmt.Errorf("invalid cni result, interface index %d out of range", *ip.Interface)
}
// when we have a ip for this interface add it to the subnets
@@ -236,7 +236,7 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption
logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name)
network := n.networks[name]
if network == nil {
- multiErr = multierror.Append(multiErr, errors.Wrapf(types.ErrNoSuchNetwork, "network %s", name))
+ multiErr = multierror.Append(multiErr, fmt.Errorf("network %s: %w", name, types.ErrNoSuchNetwork))
continue
}
cniConfList = network.cniNet
@@ -258,7 +258,7 @@ func getCachedNetworkConfig(cniConf *libcni.CNIConfig, name string, rt *libcni.R
if err != nil {
return nil, nil, err
} else if confBytes == nil {
- return nil, nil, errors.Errorf("network %s not found in CNI cache", name)
+ return nil, nil, fmt.Errorf("network %s not found in CNI cache", name)
}
cniConfList, err = libcni.ConfListFromBytes(confBytes)
diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go
index bfa72808d..7197a23bf 100644
--- a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go
+++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go
@@ -1,23 +1,23 @@
package util
import (
+ "fmt"
"net"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/libnetwork/util"
"github.com/containers/common/pkg/config"
pkgutil "github.com/containers/common/pkg/util"
- "github.com/pkg/errors"
)
func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) error {
if network.NetworkInterface != "" {
bridges := GetBridgeInterfaceNames(n)
if pkgutil.StringInSlice(network.NetworkInterface, bridges) {
- return errors.Errorf("bridge name %s already in use", network.NetworkInterface)
+ return fmt.Errorf("bridge name %s already in use", network.NetworkInterface)
}
if !types.NameRegex.MatchString(network.NetworkInterface) {
- return errors.Wrapf(types.RegexError, "bridge name %s invalid", network.NetworkInterface)
+ return fmt.Errorf("bridge name %s invalid: %w", network.NetworkInterface, types.RegexError)
}
} else {
var err error
diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/create.go b/vendor/github.com/containers/common/libnetwork/internal/util/create.go
index d4d574065..1bd2c3279 100644
--- a/vendor/github.com/containers/common/libnetwork/internal/util/create.go
+++ b/vendor/github.com/containers/common/libnetwork/internal/util/create.go
@@ -1,8 +1,9 @@
package util
import (
+ "fmt"
+
"github.com/containers/common/libnetwork/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,10 +23,10 @@ func CommonNetworkCreate(n NetUtil, network *types.Network) error {
// validate the name when given
if network.Name != "" {
if !types.NameRegex.MatchString(network.Name) {
- return errors.Wrapf(types.RegexError, "network name %s invalid", network.Name)
+ return fmt.Errorf("network name %s invalid: %w", network.Name, types.RegexError)
}
if _, err := n.Network(network.Name); err == nil {
- return errors.Wrapf(types.ErrNetworkExists, "network name %s already used", network.Name)
+ return fmt.Errorf("network name %s already used: %w", network.Name, types.ErrNetworkExists)
}
} else {
name, err = GetFreeDeviceName(n)
diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/ip.go b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go
index 6dc5d9325..7afc30f34 100644
--- a/vendor/github.com/containers/common/libnetwork/internal/util/ip.go
+++ b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go
@@ -2,9 +2,9 @@ package util
import (
"crypto/rand"
+ "errors"
+ "fmt"
"net"
-
- "github.com/pkg/errors"
)
func incByte(subnet *net.IPNet, idx int, shift uint) error {
@@ -31,7 +31,7 @@ func NextSubnet(subnet *net.IPNet) (*net.IPNet, error) {
}
ones, bits := newSubnet.Mask.Size()
if ones == 0 {
- return nil, errors.Errorf("%s has only one subnet", subnet.String())
+ return nil, fmt.Errorf("%s has only one subnet", subnet.String())
}
zeroes := uint(bits - ones)
shift := zeroes % 8
diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/parse.go b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go
index 1f68df0bb..2bda3b122 100644
--- a/vendor/github.com/containers/common/libnetwork/internal/util/parse.go
+++ b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go
@@ -1,9 +1,8 @@
package util
import (
+ "fmt"
"strconv"
-
- "github.com/pkg/errors"
)
// ParseMTU parses the mtu option
@@ -16,7 +15,7 @@ func ParseMTU(mtu string) (int, error) {
return 0, err
}
if m < 0 {
- return 0, errors.Errorf("mtu %d is less than zero", m)
+ return 0, fmt.Errorf("mtu %d is less than zero", m)
}
return m, nil
}
@@ -31,7 +30,7 @@ func ParseVlan(vlan string) (int, error) {
return 0, err
}
if v < 0 || v > 4094 {
- return 0, errors.Errorf("vlan ID %d must be between 0 and 4094", v)
+ return 0, fmt.Errorf("vlan ID %d must be between 0 and 4094", v)
}
return v, nil
}
diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
index 4dd44110a..14f4052d8 100644
--- a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
+++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
@@ -1,11 +1,12 @@
package util
import (
+ "errors"
+ "fmt"
"net"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/libnetwork/util"
- "github.com/pkg/errors"
)
// ValidateSubnet will validate a given Subnet. It checks if the
@@ -25,18 +26,18 @@ func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet)
// the network address and not a random ip in the subnet.
_, n, err := net.ParseCIDR(s.Subnet.String())
if err != nil {
- return errors.Wrap(err, "subnet invalid")
+ return fmt.Errorf("subnet invalid: %w", err)
}
// check that the new subnet does not conflict with existing ones
if NetworkIntersectsWithNetworks(n, usedNetworks) {
- return errors.Errorf("subnet %s is already used on the host or by another config", n.String())
+ return fmt.Errorf("subnet %s is already used on the host or by another config", n.String())
}
s.Subnet = types.IPNet{IPNet: *n}
if s.Gateway != nil {
if !s.Subnet.Contains(s.Gateway) {
- return errors.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet)
+ return fmt.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet)
}
util.NormalizeIP(&s.Gateway)
} else if addGateway {
@@ -50,13 +51,13 @@ func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet)
if s.LeaseRange != nil {
if s.LeaseRange.StartIP != nil {
if !s.Subnet.Contains(s.LeaseRange.StartIP) {
- return errors.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet)
+ return fmt.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet)
}
util.NormalizeIP(&s.LeaseRange.StartIP)
}
if s.LeaseRange.EndIP != nil {
if !s.Subnet.Contains(s.LeaseRange.EndIP) {
- return errors.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet)
+ return fmt.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet)
}
util.NormalizeIP(&s.LeaseRange.EndIP)
}
@@ -107,7 +108,7 @@ func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOp
// validatePerNetworkOpts checks that all given static ips are in a subnet on this network
func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOptions) error {
if netOpts.InterfaceName == "" {
- return errors.Errorf("interface name on network %s is empty", network.Name)
+ return fmt.Errorf("interface name on network %s is empty", network.Name)
}
if network.IPAMOptions[types.Driver] == types.HostLocalIPAMDriver {
outer:
@@ -117,7 +118,7 @@ func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOpt
continue outer
}
}
- return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name)
+ return fmt.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name)
}
}
return nil
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go
index d8843eb2c..647143652 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/config.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go
@@ -5,16 +5,18 @@ package netavark
import (
"encoding/json"
+ "errors"
+ "fmt"
"net"
"os"
"path/filepath"
+ "strconv"
"time"
internalutil "github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/util"
"github.com/containers/storage/pkg/stringid"
- "github.com/pkg/errors"
)
// NetworkCreate will take a partial filled Network and fill the
@@ -44,7 +46,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
// FIXME: Should we use a different type for network create without the ID field?
// the caller is not allowed to set a specific ID
if newNetwork.ID != "" {
- return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create")
+ return nil, fmt.Errorf("ID can not be set for network create: %w", types.ErrInvalidArg)
}
// generate random network ID
@@ -95,20 +97,27 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
// validate the given options, we do not need them but just check to make sure they are valid
for key, value := range newNetwork.Options {
switch key {
- case "mtu":
+ case types.MTUOption:
_, err = internalutil.ParseMTU(value)
if err != nil {
return nil, err
}
- case "vlan":
+ case types.VLANOption:
_, err = internalutil.ParseVlan(value)
if err != nil {
return nil, err
}
+ case types.IsolateOption:
+ val, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, err
+ }
+ // rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it
+ newNetwork.Options[types.IsolateOption] = strconv.FormatBool(val)
default:
- return nil, errors.Errorf("unsupported bridge network option %s", key)
+ return nil, fmt.Errorf("unsupported bridge network option %s", key)
}
}
case types.MacVLANNetworkDriver:
@@ -117,7 +126,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
return nil, err
}
default:
- return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver)
+ return nil, fmt.Errorf("unsupported driver %s: %w", newNetwork.Driver, types.ErrInvalidArg)
}
// when we do not have ipam we must disable dns
@@ -157,7 +166,7 @@ func createMacvlan(network *types.Network) error {
return err
}
if !util.StringInSlice(network.NetworkInterface, interfaceNames) {
- return errors.Errorf("parent interface %s does not exist", network.NetworkInterface)
+ return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface)
}
}
@@ -165,29 +174,29 @@ func createMacvlan(network *types.Network) error {
switch network.IPAMOptions[types.Driver] {
case "":
if len(network.Subnets) == 0 {
- return errors.Errorf("macvlan driver needs at least one subnet specified, DHCP is not yet supported with netavark")
+ return fmt.Errorf("macvlan driver needs at least one subnet specified, DHCP is not yet supported with netavark")
}
network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver
case types.HostLocalIPAMDriver:
if len(network.Subnets) == 0 {
- return errors.Errorf("macvlan driver needs at least one subnet specified, when the host-local ipam driver is set")
+ return fmt.Errorf("macvlan driver needs at least one subnet specified, when the host-local ipam driver is set")
}
}
// validate the given options, we do not need them but just check to make sure they are valid
for key, value := range network.Options {
switch key {
- case "mode":
+ case types.ModeOption:
if !util.StringInSlice(value, types.ValidMacVLANModes) {
- return errors.Errorf("unknown macvlan mode %q", value)
+ return fmt.Errorf("unknown macvlan mode %q", value)
}
- case "mtu":
+ case types.MTUOption:
_, err := internalutil.ParseMTU(value)
if err != nil {
return err
}
default:
- return errors.Errorf("unsupported macvlan network option %s", key)
+ return fmt.Errorf("unsupported macvlan network option %s", key)
}
}
return nil
@@ -210,7 +219,7 @@ func (n *netavarkNetwork) NetworkRemove(nameOrID string) error {
// Removing the default network is not allowed.
if network.Name == n.defaultNetwork {
- return errors.Errorf("default network %s cannot be removed", n.defaultNetwork)
+ return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork)
}
file := filepath.Join(n.networkConfigDir, network.Name+".json")
@@ -274,7 +283,7 @@ func validateIPAMDriver(n *types.Network) error {
case types.DHCPIPAMDriver:
return errors.New("dhcp ipam driver is not yet supported with netavark")
default:
- return errors.Errorf("unsupported ipam driver %q", ipamDriver)
+ return fmt.Errorf("unsupported ipam driver %q", ipamDriver)
}
return nil
}
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go
index 65dcd5497..93c0ac364 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/exec.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go
@@ -119,6 +119,9 @@ func (n *netavarkNetwork) execNetavark(args []string, stdin, result interface{})
if logrus.IsLevelEnabled(logrus.DebugLevel) {
cmd.Env = append(cmd.Env, "RUST_BACKTRACE=1")
}
+ if n.dnsBindPort != 0 {
+ cmd.Env = append(cmd.Env, "NETAVARK_DNS_PORT="+strconv.Itoa(int(n.dnsBindPort)))
+ }
err = cmd.Start()
if err != nil {
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
index 89820f1d6..fa5800ee4 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/libnetwork/util"
- "github.com/pkg/errors"
"go.etcd.io/bbolt"
)
@@ -180,7 +179,7 @@ func getFreeIPFromBucket(bucket *bbolt.Bucket, subnet *types.Subnet) (net.IP, er
lastIP, err := util.LastIPInSubnet(&subnet.Subnet.IPNet)
// this error should never happen but lets check anyways to prevent panics
if err != nil {
- return nil, errors.Wrap(err, "failed to get lastIP")
+ return nil, fmt.Errorf("failed to get lastIP: %w", err)
}
// ipv4 uses the last ip in a subnet for broadcast so we cannot use it
if util.IsIPv4(lastIP) {
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go
index 0d03cd5e6..e3e2f7e50 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/network.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go
@@ -5,6 +5,8 @@ package netavark
import (
"encoding/json"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -16,7 +18,6 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/unshare"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -43,6 +44,9 @@ type netavarkNetwork struct {
// defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create
defaultsubnetPools []config.SubnetPool
+ // dnsBindPort is set the the port to pass to netavark for aardvark
+ dnsBindPort uint16
+
// ipamDBPath is the path to the ip allocation bolt db
ipamDBPath string
@@ -80,6 +84,9 @@ type InitConfig struct {
// DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create
DefaultsubnetPools []config.SubnetPool
+ // DNSBindPort is set the the port to pass to netavark for aardvark
+ DNSBindPort uint16
+
// Syslog describes whenever the netavark debbug output should be log to the syslog as well.
// This will use logrus to do so, make sure logrus is set up to log to the syslog.
Syslog bool
@@ -105,7 +112,7 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
}
defaultNet, err := types.ParseCIDR(defaultSubnet)
if err != nil {
- return nil, errors.Wrap(err, "failed to parse default subnet")
+ return nil, fmt.Errorf("failed to parse default subnet: %w", err)
}
if err := os.MkdirAll(conf.NetworkConfigDir, 0o755); err != nil {
@@ -131,6 +138,7 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
defaultNetwork: defaultNetworkName,
defaultSubnet: defaultNet,
defaultsubnetPools: defaultSubnetPools,
+ dnsBindPort: conf.DNSBindPort,
lock: lock,
syslog: conf.Syslog,
}
@@ -221,7 +229,7 @@ func (n *netavarkNetwork) loadNetworks() error {
if networks[n.defaultNetwork] == nil {
networkInfo, err := n.createDefaultNetwork()
if err != nil {
- return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork)
+ return fmt.Errorf("failed to create default network %s: %w", n.defaultNetwork, err)
}
networks[n.defaultNetwork] = networkInfo
}
@@ -242,7 +250,7 @@ func parseNetwork(network *types.Network) error {
}
if len(network.ID) != 64 {
- return errors.Errorf("invalid network ID %q", network.ID)
+ return fmt.Errorf("invalid network ID %q", network.ID)
}
// add gateway when not internal or dns enabled
@@ -284,7 +292,7 @@ func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) {
if strings.HasPrefix(val.ID, nameOrID) {
if net != nil {
- return nil, errors.Errorf("more than one result for network ID %s", nameOrID)
+ return nil, fmt.Errorf("more than one result for network ID %s", nameOrID)
}
net = val
}
@@ -292,7 +300,7 @@ func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) {
if net != nil {
return net, nil
}
- return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID)
+ return nil, fmt.Errorf("unable to find network with name or ID %s: %w", nameOrID, types.ErrNoSuchNetwork)
}
// Implement the NetUtil interface for easy code sharing with other network interfaces.
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go
index 7f0a84140..b364f42d3 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/run.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -42,7 +41,7 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions
netavarkOpts, err := n.convertNetOpts(options.NetworkOptions)
if err != nil {
- return nil, errors.Wrap(err, "failed to convert net opts")
+ return nil, fmt.Errorf("failed to convert net opts: %w", err)
}
// Warn users if one or more networks have dns enabled
@@ -103,7 +102,7 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO
netavarkOpts, err := n.convertNetOpts(options.NetworkOptions)
if err != nil {
- return errors.Wrap(err, "failed to convert net opts")
+ return fmt.Errorf("failed to convert net opts: %w", err)
}
retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil)
diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go
index f41598f77..639ff4e45 100644
--- a/vendor/github.com/containers/common/libnetwork/network/interface.go
+++ b/vendor/github.com/containers/common/libnetwork/network/interface.go
@@ -84,6 +84,7 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type
DefaultNetwork: conf.Network.DefaultNetwork,
DefaultSubnet: conf.Network.DefaultSubnet,
DefaultsubnetPools: conf.Network.DefaultSubnetPools,
+ DNSBindPort: conf.Network.DNSBindPort,
Syslog: syslog,
})
return types.Netavark, netInt, err
@@ -160,7 +161,7 @@ func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) {
confDir := conf.Network.NetworkConfigDir
if confDir == "" {
var err error
- confDir, err = getDefultCNIConfigDir()
+ confDir, err = getDefaultCNIConfigDir()
if err != nil {
return nil, err
}
@@ -175,7 +176,7 @@ func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) {
})
}
-func getDefultCNIConfigDir() (string, error) {
+func getDefaultCNIConfigDir() (string, error) {
if !unshare.IsRootless() {
return cniConfigDir, nil
}
diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go
index a1e4d71ed..da8fa31c6 100644
--- a/vendor/github.com/containers/common/libnetwork/types/const.go
+++ b/vendor/github.com/containers/common/libnetwork/types/const.go
@@ -34,6 +34,12 @@ const (
IPVLANModeL2 = "l2"
IPVLANModeL3 = "l3"
IPVLANModeL3s = "l3s"
+
+ // valid network options
+ VLANOption = "vlan"
+ MTUOption = "mtu"
+ ModeOption = "mode"
+ IsolateOption = "isolate"
)
type NetworkBackend string
diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go
index d37e529df..f84221458 100644
--- a/vendor/github.com/containers/common/libnetwork/types/define.go
+++ b/vendor/github.com/containers/common/libnetwork/types/define.go
@@ -1,9 +1,9 @@
package types
import (
+ "errors"
+ "fmt"
"regexp"
-
- "github.com/pkg/errors"
)
var (
@@ -21,5 +21,5 @@ var (
// This must NOT be changed.
NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$")
// RegexError is thrown in presence of an invalid name.
- RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*")
+ RegexError = fmt.Errorf("names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*: %w", ErrInvalidArg) // nolint:revive // This lint is new and we do not want to break the API.
)
diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go
index 58d79d25b..a8ef8a413 100644
--- a/vendor/github.com/containers/common/libnetwork/util/filters.go
+++ b/vendor/github.com/containers/common/libnetwork/util/filters.go
@@ -1,12 +1,12 @@
package util
import (
+ "fmt"
"strings"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/filters"
"github.com/containers/common/pkg/util"
- "github.com/pkg/errors"
)
func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) {
@@ -75,6 +75,6 @@ func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc
return net.Created.Before(until)
}, nil
default:
- return nil, errors.Errorf("invalid filter %q", key)
+ return nil, fmt.Errorf("invalid filter %q", key)
}
}
diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
index 1fd269255..7ba63ba74 100644
--- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
+++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
@@ -6,6 +6,8 @@ package apparmor
import (
"bufio"
"bytes"
+ "errors"
+ "fmt"
"io"
"os"
"os/exec"
@@ -17,7 +19,6 @@ import (
"github.com/containers/common/pkg/apparmor/internal/supported"
"github.com/containers/storage/pkg/unshare"
runcaa "github.com/opencontainers/runc/libcontainer/apparmor"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -47,7 +48,7 @@ type profileData struct {
func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer) error {
compiled, err := template.New("apparmor_profile").Parse(defaultProfileTemplate)
if err != nil {
- return errors.Wrap(err, "create AppArmor profile from template")
+ return fmt.Errorf("create AppArmor profile from template: %w", err)
}
if macroExists("tunables/global") {
@@ -62,11 +63,15 @@ func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer)
ver, err := getAAParserVersion(apparmorParserPath)
if err != nil {
- return errors.Wrap(err, "get AppArmor version")
+ return fmt.Errorf("get AppArmor version: %w", err)
}
p.Version = ver
- return errors.Wrap(compiled.Execute(out, p), "execute compiled profile")
+ if err := compiled.Execute(out, p); err != nil {
+ return fmt.Errorf("execute compiled profile: %w", err)
+ }
+
+ return nil
}
// macrosExists checks if the passed macro exists.
@@ -88,19 +93,19 @@ func InstallDefault(name string) error {
apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary()
if err != nil {
- return errors.Wrap(err, "find `apparmor_parser` binary")
+ return fmt.Errorf("find `apparmor_parser` binary: %w", err)
}
cmd := exec.Command(apparmorParserPath, "-Kr")
pipe, err := cmd.StdinPipe()
if err != nil {
- return errors.Wrapf(err, "execute %s", apparmorParserPath)
+ return fmt.Errorf("execute %s: %w", apparmorParserPath, err)
}
if err := cmd.Start(); err != nil {
if pipeErr := pipe.Close(); pipeErr != nil {
logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr)
}
- return errors.Wrapf(err, "start %s command", apparmorParserPath)
+ return fmt.Errorf("start %s command: %w", apparmorParserPath, err)
}
if err := p.generateDefault(apparmorParserPath, pipe); err != nil {
if pipeErr := pipe.Close(); pipeErr != nil {
@@ -109,14 +114,18 @@ func InstallDefault(name string) error {
if cmdErr := cmd.Wait(); cmdErr != nil {
logrus.Errorf("Unable to wait for AppArmor command: %q", cmdErr)
}
- return errors.Wrap(err, "generate default profile into pipe")
+ return fmt.Errorf("generate default profile into pipe: %w", err)
}
if pipeErr := pipe.Close(); pipeErr != nil {
logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr)
}
- return errors.Wrap(cmd.Wait(), "wait for AppArmor command")
+ if err := cmd.Wait(); err != nil {
+ return fmt.Errorf("wait for AppArmor command: %w", err)
+ }
+
+ return nil
}
// DefaultContent returns the default profile content as byte slice. The
@@ -128,11 +137,11 @@ func DefaultContent(name string) ([]byte, error) {
apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary()
if err != nil {
- return nil, errors.Wrap(err, "find `apparmor_parser` binary")
+ return nil, fmt.Errorf("find `apparmor_parser` binary: %w", err)
}
if err := p.generateDefault(apparmorParserPath, buffer); err != nil {
- return nil, errors.Wrap(err, "generate default AppAmor profile")
+ return nil, fmt.Errorf("generate default AppAmor profile: %w", err)
}
return buffer.Bytes(), nil
}
@@ -141,15 +150,15 @@ func DefaultContent(name string) ([]byte, error) {
// kernel.
func IsLoaded(name string) (bool, error) {
if name != "" && unshare.IsRootless() {
- return false, errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name)
+ return false, fmt.Errorf("cannot load AppArmor profile %q: %w", name, ErrApparmorRootless)
}
file, err := os.Open("/sys/kernel/security/apparmor/profiles")
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
return false, nil
}
- return false, errors.Wrap(err, "open AppArmor profile path")
+ return false, fmt.Errorf("open AppArmor profile path: %w", err)
}
defer file.Close()
@@ -160,7 +169,7 @@ func IsLoaded(name string) (bool, error) {
break
}
if err != nil {
- return false, errors.Wrap(err, "reading AppArmor profile")
+ return false, fmt.Errorf("reading AppArmor profile: %w", err)
}
if strings.HasPrefix(p, name+" ") {
return true, nil
@@ -177,7 +186,7 @@ func execAAParser(apparmorParserPath, dir string, args ...string) (string, error
output, err := c.Output()
if err != nil {
- return "", errors.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err)
+ return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err)
}
return string(output), nil
@@ -187,7 +196,7 @@ func execAAParser(apparmorParserPath, dir string, args ...string) (string, error
func getAAParserVersion(apparmorParserPath string) (int, error) {
output, err := execAAParser(apparmorParserPath, "", "--version")
if err != nil {
- return -1, errors.Wrap(err, "execute apparmor_parser")
+ return -1, fmt.Errorf("execute apparmor_parser: %w", err)
}
return parseAAParserVersion(output)
}
@@ -206,7 +215,7 @@ func parseAAParserVersion(output string) (int, error) {
// split by major minor version
v := strings.Split(version, ".")
if len(v) == 0 || len(v) > 3 {
- return -1, errors.Errorf("parsing version failed for output: `%s`", output)
+ return -1, fmt.Errorf("parsing version failed for output: `%s`", output)
}
// Default the versions to 0.
@@ -214,19 +223,19 @@ func parseAAParserVersion(output string) (int, error) {
majorVersion, err := strconv.Atoi(v[0])
if err != nil {
- return -1, errors.Wrap(err, "convert AppArmor major version")
+ return -1, fmt.Errorf("convert AppArmor major version: %w", err)
}
if len(v) > 1 {
minorVersion, err = strconv.Atoi(v[1])
if err != nil {
- return -1, errors.Wrap(err, "convert AppArmor minor version")
+ return -1, fmt.Errorf("convert AppArmor minor version: %w", err)
}
}
if len(v) > 2 {
patchLevel, err = strconv.Atoi(v[2])
if err != nil {
- return -1, errors.Wrap(err, "convert AppArmor patch version")
+ return -1, fmt.Errorf("convert AppArmor patch version: %w", err)
}
}
@@ -250,7 +259,7 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
// privileges. Return an error in case a specific profile is specified.
if unshare.IsRootless() {
if name != "" {
- return "", errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name)
+ return "", fmt.Errorf("cannot load AppArmor profile %q: %w", name, ErrApparmorRootless)
}
logrus.Debug("Skipping loading default AppArmor profile (rootless mode)")
return "", nil
@@ -261,7 +270,7 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
if name == "" {
return "", nil
}
- return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name)
+ return "", fmt.Errorf("profile %q specified but AppArmor is disabled on the host", name)
}
if name == "" {
@@ -271,10 +280,10 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
// name.
isLoaded, err := IsLoaded(name)
if err != nil {
- return "", errors.Wrapf(err, "verify if profile %s is loaded", name)
+ return "", fmt.Errorf("verify if profile %s is loaded: %w", name, err)
}
if !isLoaded {
- return "", errors.Errorf("AppArmor profile %q specified but not loaded", name)
+ return "", fmt.Errorf("AppArmor profile %q specified but not loaded", name)
}
return name, nil
}
@@ -283,12 +292,12 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
// if it's loaded before installing it.
isLoaded, err := IsLoaded(name)
if err != nil {
- return "", errors.Wrapf(err, "verify if profile %s is loaded", name)
+ return "", fmt.Errorf("verify if profile %s is loaded: %w", name, err)
}
if !isLoaded {
err = InstallDefault(name)
if err != nil {
- return "", errors.Wrapf(err, "install profile %s", name)
+ return "", fmt.Errorf("install profile %s: %w", name, err)
}
logrus.Infof("Successfully loaded AppAmor profile %q", name)
} else {
diff --git a/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go
index 778f4e3a2..1ee44156d 100644
--- a/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go
+++ b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go
@@ -1,6 +1,8 @@
package supported
import (
+ "errors"
+ "fmt"
"os"
"os/exec"
"path/filepath"
@@ -8,7 +10,6 @@ import (
"github.com/containers/storage/pkg/unshare"
runcaa "github.com/opencontainers/runc/libcontainer/apparmor"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -81,7 +82,7 @@ func (a *ApparmorVerifier) FindAppArmorParserBinary() (string, error) {
return path, nil
}
- return "", errors.Errorf(
+ return "", fmt.Errorf(
"%s binary neither found in %s nor $PATH", binary, sbin,
)
}
diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go
index 188e06c12..770dc814d 100644
--- a/vendor/github.com/containers/common/pkg/auth/auth.go
+++ b/vendor/github.com/containers/common/pkg/auth/auth.go
@@ -3,6 +3,7 @@ package auth
import (
"bufio"
"context"
+ "errors"
"fmt"
"net/url"
"os"
@@ -14,7 +15,6 @@ import (
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
terminal "golang.org/x/term"
)
@@ -39,7 +39,7 @@ func CheckAuthFile(authfile string) error {
return nil
}
if _, err := os.Stat(authfile); err != nil {
- return errors.Wrap(err, "checking authfile")
+ return fmt.Errorf("checking authfile: %w", err)
}
return nil
}
@@ -97,12 +97,12 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
authConfig, err := config.GetCredentials(systemContext, key)
if err != nil {
- return errors.Wrap(err, "get credentials")
+ return fmt.Errorf("get credentials: %w", err)
}
if opts.GetLoginSet {
if authConfig.Username == "" {
- return errors.Errorf("not logged into %s", key)
+ return fmt.Errorf("not logged into %s", key)
}
fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username)
return nil
@@ -139,7 +139,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
username, password, err := getUserAndPass(opts, password, authConfig.Username)
if err != nil {
- return errors.Wrap(err, "getting username and password")
+ return fmt.Errorf("getting username and password: %w", err)
}
if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil {
@@ -158,9 +158,9 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
}
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
logrus.Debugf("error logging into %q: %v", key, unauthorized)
- return errors.Errorf("error logging into %q: invalid username/password", key)
+ return fmt.Errorf("error logging into %q: invalid username/password", key)
}
- return errors.Wrapf(err, "authenticating creds for %q", key)
+ return fmt.Errorf("authenticating creds for %q: %w", key, err)
}
// parseCredentialsKey turns the provided argument into a valid credential key
@@ -191,10 +191,10 @@ func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry str
// Ideally c/image should provide dedicated validation functionality.
ref, err := reference.ParseNormalizedNamed(key)
if err != nil {
- return "", "", errors.Wrapf(err, "parse reference from %q", key)
+ return "", "", fmt.Errorf("parse reference from %q: %w", key, err)
}
if !reference.IsNameOnly(ref) {
- return "", "", errors.Errorf("reference %q contains tag or digest", ref.String())
+ return "", "", fmt.Errorf("reference %q contains tag or digest", ref.String())
}
refRegistry := reference.Domain(ref)
if refRegistry != registry { // This should never happen, check just to make sure
@@ -232,7 +232,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
}
username, err = reader.ReadString('\n')
if err != nil {
- return "", "", errors.Wrap(err, "reading username")
+ return "", "", fmt.Errorf("reading username: %w", err)
}
// If the user just hit enter, use the displayed user from the
// the authentication file. This allows to do a lazy
@@ -246,7 +246,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
fmt.Fprint(opts.Stdout, "Password: ")
pass, err := terminal.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
- return "", "", errors.Wrap(err, "reading password")
+ return "", "", fmt.Errorf("reading password: %w", err)
}
password = string(pass)
fmt.Fprintln(opts.Stdout)
@@ -298,14 +298,15 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
}
err = config.RemoveAuthentication(systemContext, key)
- switch errors.Cause(err) {
- case nil:
+ if err == nil {
fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key)
return nil
- case config.ErrNotLoggedIn:
+ }
+
+ if errors.Is(err, config.ErrNotLoggedIn) {
authConfig, err := config.GetCredentials(systemContext, key)
if err != nil {
- return errors.Wrap(err, "get credentials")
+ return fmt.Errorf("get credentials: %w", err)
}
authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry)
@@ -313,10 +314,10 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key)
return nil
}
- return errors.Errorf("not logged into %s", key)
- default:
- return errors.Wrapf(err, "logging out of %q", key)
+ return fmt.Errorf("not logged into %s", key)
}
+
+ return fmt.Errorf("logging out of %q: %w", key, err)
}
// defaultRegistryWhenUnspecified returns first registry from search list of registry.conf
@@ -324,7 +325,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) {
registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext)
if err != nil {
- return "", errors.Wrap(err, "getting registry from registry.conf, please specify a registry")
+ return "", fmt.Errorf("getting registry from registry.conf, please specify a registry: %w", err)
}
if len(registriesFromFile) == 0 {
return "", errors.New("no registries found in registries.conf, a registry must be provided")
diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go
index b8e3fbcb5..3bf25e086 100644
--- a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go
+++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go
@@ -6,11 +6,12 @@ package capabilities
// changed significantly to fit the needs of libpod.
import (
+ "errors"
+ "fmt"
"sort"
"strings"
"sync"
- "github.com/pkg/errors"
"github.com/syndtr/gocapability/capability"
)
@@ -115,7 +116,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) {
c = "CAP_" + c
}
if !stringInSlice(c, capabilityList) {
- return nil, errors.Wrapf(ErrUnknownCapability, "%q", c)
+ return nil, fmt.Errorf("%q: %w", c, ErrUnknownCapability)
}
normalized = append(normalized, c)
}
@@ -127,7 +128,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) {
func ValidateCapabilities(caps []string) error {
for _, c := range caps {
if !stringInSlice(c, capabilityList) {
- return errors.Wrapf(ErrUnknownCapability, "%q", c)
+ return fmt.Errorf("%q: %w", c, ErrUnknownCapability)
}
}
return nil
@@ -176,14 +177,14 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) {
} else {
for _, add := range capAdd {
if stringInSlice(add, capDrop) {
- return nil, errors.Errorf("capability %q cannot be dropped and added", add)
+ return nil, fmt.Errorf("capability %q cannot be dropped and added", add)
}
}
}
for _, drop := range capDrop {
if stringInSlice(drop, capAdd) {
- return nil, errors.Errorf("capability %q cannot be dropped and added", drop)
+ return nil, fmt.Errorf("capability %q cannot be dropped and added", drop)
}
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio.go b/vendor/github.com/containers/common/pkg/cgroups/blkio.go
index a72a641c8..e157e6faf 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/blkio.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/blkio.go
@@ -5,6 +5,7 @@ package cgroups
import (
"bufio"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -12,7 +13,6 @@ import (
"strings"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
type blkioHandler struct{}
@@ -101,10 +101,10 @@ func (c *blkioHandler) Stat(ctr *CgroupControl, m *Metrics) error {
p := filepath.Join(BlkioRoot, "blkio.throttle.io_service_bytes_recursive")
f, err := os.Open(p)
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
return nil
}
- return errors.Wrapf(err, "open %s", p)
+ return fmt.Errorf("open %s: %w", p, err)
}
defer f.Close()
@@ -143,7 +143,7 @@ func (c *blkioHandler) Stat(ctr *CgroupControl, m *Metrics) error {
ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry)
}
if err := scanner.Err(); err != nil {
- return errors.Wrapf(err, "parse %s", p)
+ return fmt.Errorf("parse %s: %w", p, err)
}
}
m.Blkio = BlkioMetrics{IoServiceBytesRecursive: ioServiceBytesRecursive}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go
index 98b8ae541..ced461e69 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go
@@ -5,6 +5,8 @@ package cgroups
import (
"bufio"
+ "errors"
+ "fmt"
"os"
"path/filepath"
"strconv"
@@ -14,7 +16,6 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"github.com/opencontainers/runc/libcontainer/configs"
- "github.com/pkg/errors"
)
type linuxBlkioHandler struct {
@@ -111,10 +112,10 @@ func (c *linuxBlkioHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error {
p := filepath.Join(BlkioRoot, "blkio.throttle.io_service_bytes_recursive")
f, err := os.Open(p)
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
return nil
}
- return errors.Wrapf(err, "open %s", p)
+ return fmt.Errorf("open %s: %w", p, err)
}
defer f.Close()
@@ -153,7 +154,7 @@ func (c *linuxBlkioHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error {
ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry)
}
if err := scanner.Err(); err != nil {
- return errors.Wrapf(err, "parse %s", p)
+ return fmt.Errorf("parse %s: %w", p, err)
}
}
m.BlkioStats.IoServiceBytesRecursive = ioServiceBytesRecursive
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go
index eb903d3c3..9c93618df 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go
@@ -6,6 +6,7 @@ package cgroups
import (
"bufio"
"context"
+ "errors"
"fmt"
"io/ioutil"
"math"
@@ -18,7 +19,6 @@ import (
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
"github.com/godbus/dbus/v5"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -145,7 +145,7 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool)
}
controllersFileBytes, err := ioutil.ReadFile(controllersFile)
if err != nil {
- return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", controllersFile)
+ return nil, fmt.Errorf("failed while reading controllers for cgroup v2 from %q: %w", controllersFile, err)
}
for _, controllerName := range strings.Fields(string(controllersFileBytes)) {
c := controller{
@@ -264,7 +264,7 @@ func (c *CgroupControl) initialize() (err error) {
}()
if c.cgroup2 {
if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.path)); err != nil {
- return errors.Wrapf(err, "error creating cgroup path %s", c.path)
+ return fmt.Errorf("error creating cgroup path %s: %w", c.path, err)
}
}
for name, handler := range handlers {
@@ -285,7 +285,7 @@ func (c *CgroupControl) initialize() (err error) {
}
path := c.getCgroupv1Path(ctr.name)
if err := os.MkdirAll(path, 0o755); err != nil {
- return errors.Wrapf(err, "error creating cgroup path for %s", ctr.name)
+ return fmt.Errorf("error creating cgroup path for %s: %w", ctr.name, err)
}
}
}
@@ -304,7 +304,7 @@ func readFileAsUint64(path string) (uint64, error) {
}
ret, err := strconv.ParseUint(v, 10, 64)
if err != nil {
- return ret, errors.Wrapf(err, "parse %s from %s", v, path)
+ return ret, fmt.Errorf("parse %s from %s: %w", v, path, err)
}
return ret, nil
}
@@ -323,7 +323,7 @@ func readFileByKeyAsUint64(path, key string) (uint64, error) {
}
ret, err := strconv.ParseUint(v, 10, 64)
if err != nil {
- return ret, errors.Wrapf(err, "parse %s from %s", v, path)
+ return ret, fmt.Errorf("parse %s from %s: %w", v, path, err)
}
return ret, nil
}
@@ -498,7 +498,7 @@ func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) er
}
p := c.getCgroupv1Path(ctr.name)
if err := rmDirRecursively(p); err != nil {
- lastError = errors.Wrapf(err, "remove %s", p)
+ lastError = fmt.Errorf("remove %s: %w", p, err)
}
}
return lastError
@@ -534,7 +534,7 @@ func (c *CgroupControl) AddPid(pid int) error {
if c.cgroup2 {
p := filepath.Join(cgroupRoot, c.path, "cgroup.procs")
if err := ioutil.WriteFile(p, pidString, 0o644); err != nil {
- return errors.Wrapf(err, "write %s", p)
+ return fmt.Errorf("write %s: %w", p, err)
}
return nil
}
@@ -557,7 +557,7 @@ func (c *CgroupControl) AddPid(pid int) error {
}
p := filepath.Join(c.getCgroupv1Path(n), "tasks")
if err := ioutil.WriteFile(p, pidString, 0o644); err != nil {
- return errors.Wrapf(err, "write %s", p)
+ return fmt.Errorf("write %s: %w", p, err)
}
}
return nil
@@ -569,7 +569,7 @@ func (c *CgroupControl) Stat() (*Metrics, error) {
found := false
for _, h := range handlers {
if err := h.Stat(c, &m); err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
+ if !errors.Is(err, os.ErrNotExist) {
return nil, err
}
logrus.Warningf("Failed to retrieve cgroup stats: %v", err)
@@ -587,10 +587,10 @@ func readCgroup2MapPath(path string) (map[string][]string, error) {
ret := map[string][]string{}
f, err := os.Open(path)
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
return ret, nil
}
- return nil, errors.Wrapf(err, "open file %s", path)
+ return nil, fmt.Errorf("open file %s: %w", path, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
@@ -603,7 +603,7 @@ func readCgroup2MapPath(path string) (map[string][]string, error) {
ret[parts[0]] = parts[1:]
}
if err := scanner.Err(); err != nil {
- return nil, errors.Wrapf(err, "parsing file %s", path)
+ return nil, fmt.Errorf("parsing file %s: %w", path, err)
}
return ret, nil
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go
index 4b72014bf..45f7bde29 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go
@@ -6,6 +6,7 @@ package cgroups
import (
"bufio"
"context"
+ "errors"
"fmt"
"io/ioutil"
"math"
@@ -20,7 +21,6 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"github.com/opencontainers/runc/libcontainer/configs"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -98,7 +98,7 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool)
}
controllersFileBytes, err := ioutil.ReadFile(controllersFile)
if err != nil {
- return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", controllersFile)
+ return nil, fmt.Errorf("failed while reading controllers for cgroup v2 from %q: %w", controllersFile, err)
}
for _, controllerName := range strings.Fields(string(controllersFileBytes)) {
c := controller{
@@ -217,7 +217,7 @@ func (c *CgroupControl) initialize() (err error) {
}()
if c.cgroup2 {
if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.config.Path)); err != nil {
- return errors.Wrapf(err, "error creating cgroup path %s", c.config.Path)
+ return fmt.Errorf("error creating cgroup path %s: %w", c.config.Path, err)
}
}
for name, handler := range handlers {
@@ -238,7 +238,7 @@ func (c *CgroupControl) initialize() (err error) {
}
path := c.getCgroupv1Path(ctr.name)
if err := os.MkdirAll(path, 0o755); err != nil {
- return errors.Wrapf(err, "error creating cgroup path for %s", ctr.name)
+ return fmt.Errorf("error creating cgroup path for %s: %w", ctr.name, err)
}
}
}
@@ -257,7 +257,7 @@ func readFileAsUint64(path string) (uint64, error) {
}
ret, err := strconv.ParseUint(v, 10, 64)
if err != nil {
- return ret, errors.Wrapf(err, "parse %s from %s", v, path)
+ return ret, fmt.Errorf("parse %s from %s: %w", v, path, err)
}
return ret, nil
}
@@ -276,7 +276,7 @@ func readFileByKeyAsUint64(path, key string) (uint64, error) {
}
ret, err := strconv.ParseUint(v, 10, 64)
if err != nil {
- return ret, errors.Wrapf(err, "parse %s from %s", v, path)
+ return ret, fmt.Errorf("parse %s from %s: %w", v, path, err)
}
return ret, nil
}
@@ -461,7 +461,7 @@ func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) er
}
p := c.getCgroupv1Path(ctr.name)
if err := rmDirRecursively(p); err != nil {
- lastError = errors.Wrapf(err, "remove %s", p)
+ lastError = fmt.Errorf("remove %s: %w", p, err)
}
}
return lastError
@@ -517,7 +517,7 @@ func (c *CgroupControl) AddPid(pid int) error {
}
p := filepath.Join(c.getCgroupv1Path(n), "tasks")
if err := ioutil.WriteFile(p, pidString, 0o644); err != nil {
- return errors.Wrapf(err, "write %s", p)
+ return fmt.Errorf("write %s: %w", p, err)
}
}
return nil
@@ -529,7 +529,7 @@ func (c *CgroupControl) Stat() (*cgroups.Stats, error) {
found := false
for _, h := range handlers {
if err := h.Stat(c, &m); err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
+ if !errors.Is(err, os.ErrNotExist) {
return nil, err
}
logrus.Warningf("Failed to retrieve cgroup stats: %v", err)
@@ -547,10 +547,10 @@ func readCgroup2MapPath(path string) (map[string][]string, error) {
ret := map[string][]string{}
f, err := os.Open(path)
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
return ret, nil
}
- return nil, errors.Wrapf(err, "open file %s", path)
+ return nil, fmt.Errorf("open file %s: %w", path, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
@@ -563,7 +563,7 @@ func readCgroup2MapPath(path string) (map[string][]string, error) {
ret[parts[0]] = parts[1:]
}
if err := scanner.Err(); err != nil {
- return nil, errors.Wrapf(err, "parsing file %s", path)
+ return nil, fmt.Errorf("parsing file %s: %w", path, err)
}
return ret, nil
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
index 3e7653672..419bc4ec3 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
@@ -5,6 +5,7 @@ package cgroups
import (
"bufio"
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -15,7 +16,6 @@ import (
"syscall"
"time"
- "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -88,7 +88,7 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) {
}
}
if err := scanner.Err(); err != nil {
- return false, errors.Wrapf(err, "parsing file /proc/self/cgroup")
+ return false, fmt.Errorf("parsing file /proc/self/cgroup: %w", err)
}
return true, nil
}
@@ -113,7 +113,7 @@ func rmDirRecursively(path string) error {
}
}
- if err := os.Remove(path); err == nil || os.IsNotExist(err) {
+ if err := os.Remove(path); err == nil || errors.Is(err, os.ErrNotExist) {
return nil
}
entries, err := ioutil.ReadDir(path)
@@ -131,7 +131,7 @@ func rmDirRecursively(path string) error {
attempts := 0
for {
err := os.Remove(path)
- if err == nil || os.IsNotExist(err) {
+ if err == nil || errors.Is(err, os.ErrNotExist) {
return nil
}
if errors.Is(err, unix.EBUSY) {
@@ -150,6 +150,6 @@ func rmDirRecursively(path string) error {
continue
}
}
- return errors.Wrapf(err, "remove %s", path)
+ return fmt.Errorf("remove %s: %w", path, err)
}
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu.go b/vendor/github.com/containers/common/pkg/cgroups/cpu.go
index fff76b9e2..16293e74c 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cpu.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cpu.go
@@ -4,12 +4,12 @@
package cgroups
import (
+ "errors"
"fmt"
"os"
"strconv"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
type cpuHandler struct{}
@@ -66,21 +66,21 @@ func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error {
} else {
usage.Total, err = readAcct(ctr, "cpuacct.usage")
if err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
+ if !errors.Is(err, os.ErrNotExist) {
return err
}
usage.Total = 0
}
usage.Kernel, err = readAcct(ctr, "cpuacct.usage_sys")
if err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
+ if !errors.Is(err, os.ErrNotExist) {
return err
}
usage.Kernel = 0
}
usage.PerCPU, err = readAcctList(ctr, "cpuacct.usage_percpu")
if err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
+ if !errors.Is(err, os.ErrNotExist) {
return err
}
usage.PerCPU = nil
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go
index bca55575c..4931be6ef 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go
@@ -4,6 +4,7 @@
package cgroups
import (
+ "errors"
"os"
"path/filepath"
"strconv"
@@ -12,7 +13,6 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"github.com/opencontainers/runc/libcontainer/configs"
- "github.com/pkg/errors"
)
type linuxCPUHandler struct {
@@ -75,21 +75,21 @@ func (c *linuxCPUHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error {
} else {
cpu.CpuUsage.TotalUsage, err = readAcct(ctr, "cpuacct.usage")
if err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
+ if !errors.Is(err, os.ErrNotExist) {
return err
}
cpu.CpuUsage.TotalUsage = 0
}
cpu.CpuUsage.UsageInKernelmode, err = readAcct(ctr, "cpuacct.usage_sys")
if err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
+ if !errors.Is(err, os.ErrNotExist) {
return err
}
cpu.CpuUsage.UsageInKernelmode = 0
}
cpu.CpuUsage.PercpuUsage, err = readAcctList(ctr, "cpuacct.usage_percpu")
if err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
+ if !errors.Is(err, os.ErrNotExist) {
return err
}
cpu.CpuUsage.PercpuUsage = nil
diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go
index ee9f584de..e0f73b4e7 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go
@@ -14,6 +14,11 @@ import (
"github.com/opencontainers/runc/libcontainer/configs"
)
+type BlkioDev struct {
+ Device string
+ Bytes uint64
+}
+
func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Conn) error {
slice, name := filepath.Split(path)
slice = strings.TrimSuffix(slice, "/")
@@ -24,11 +29,18 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con
systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
systemdDbus.PropWants(slice),
}
+ ioString := ""
+ v2, _ := IsCgroup2UnifiedMode()
+ if v2 {
+ ioString = "IOAccounting"
+ } else {
+ ioString = "BlockIOAccounting"
+ }
pMap := map[string]bool{
"DefaultDependencies": false,
"MemoryAccounting": true,
"CPUAccounting": true,
- "BlockIOAccounting": true,
+ ioString: true,
}
if i == 0 {
pMap["Delegate"] = true
@@ -42,7 +54,7 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con
properties = append(properties, p)
}
- uMap, sMap, bMap, iMap := resourcesToProps(resources)
+ uMap, sMap, bMap, iMap, structMap := resourcesToProps(resources, v2)
for k, v := range uMap {
p := systemdDbus.Property{
Name: k,
@@ -75,6 +87,14 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con
properties = append(properties, p)
}
+ for k, v := range structMap {
+ p := systemdDbus.Property{
+ Name: k,
+ Value: dbus.MakeVariant(v),
+ }
+ properties = append(properties, p)
+ }
+
ch := make(chan string)
_, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch)
if err != nil {
@@ -117,12 +137,13 @@ func systemdDestroyConn(path string, c *systemdDbus.Conn) error {
return nil
}
-func resourcesToProps(res *configs.Resources) (map[string]uint64, map[string]string, map[string][]byte, map[string]int64) {
+func resourcesToProps(res *configs.Resources, v2 bool) (map[string]uint64, map[string]string, map[string][]byte, map[string]int64, map[string][]BlkioDev) {
bMap := make(map[string][]byte)
// this array is not used but will be once more resource limits are added
sMap := make(map[string]string)
iMap := make(map[string]int64)
uMap := make(map[string]uint64)
+ structMap := make(map[string][]BlkioDev)
// CPU
if res.CpuPeriod != 0 {
@@ -140,6 +161,17 @@ func resourcesToProps(res *configs.Resources) (map[string]uint64, map[string]str
uMap["CPUQuotaPerSecUSec"] = cpuQuotaPerSecUSec
}
+ if res.CpuShares != 0 {
+ // convert from shares to weight. weight only supports 1-10000
+ v2, _ := IsCgroup2UnifiedMode()
+ if v2 {
+ wt := (1 + ((res.CpuShares-2)*9999)/262142)
+ uMap["CPUWeight"] = wt
+ } else {
+ uMap["CPUShares"] = res.CpuShares
+ }
+ }
+
// CPUSet
if res.CpusetCpus != "" {
bits := []byte(res.CpusetCpus)
@@ -155,7 +187,16 @@ func resourcesToProps(res *configs.Resources) (map[string]uint64, map[string]str
uMap["MemoryMax"] = uint64(res.Memory)
}
if res.MemorySwap != 0 {
- uMap["MemorySwapMax"] = uint64(res.MemorySwap)
+ switch {
+ case res.Memory == -1 || res.MemorySwap == -1:
+ swap := -1
+ uMap["MemorySwapMax"] = uint64(swap)
+ case v2:
+ // swap max = swap (limit + swap limit) - limit
+ uMap["MemorySwapMax"] = uint64(res.MemorySwap - res.Memory)
+ default:
+ uMap["MemorySwapMax"] = uint64(res.MemorySwap)
+ }
}
// Blkio
@@ -163,5 +204,51 @@ func resourcesToProps(res *configs.Resources) (map[string]uint64, map[string]str
uMap["BlockIOWeight"] = uint64(res.BlkioWeight)
}
- return uMap, sMap, bMap, iMap
+ // systemd requires the paths to be in the form /dev/{block, char}/major:minor
+ // this is difficult since runc's resources only store the major and minor, not the type of device
+ // therefore, assume all are block (I think this is a correct assumption)
+ if res.BlkioThrottleReadBpsDevice != nil {
+ for _, entry := range res.BlkioThrottleReadBpsDevice {
+ newThrottle := BlkioDev{
+ Device: fmt.Sprintf("/dev/block/%d:%d", entry.Major, entry.Minor),
+ Bytes: entry.Rate,
+ }
+ if v2 {
+ structMap["IOReadBandwidthMax"] = append(structMap["IOReadBandwidthMax"], newThrottle)
+ } else {
+ structMap["BlockIOReadBandwidth"] = append(structMap["BlockIOReadBandwidth"], newThrottle)
+ }
+ }
+ }
+
+ if res.BlkioThrottleWriteBpsDevice != nil {
+ for _, entry := range res.BlkioThrottleWriteBpsDevice {
+ newThrottle := BlkioDev{
+ Device: fmt.Sprintf("/dev/block/%d:%d", entry.Major, entry.Minor),
+ Bytes: entry.Rate,
+ }
+ if v2 {
+ structMap["IOWriteBandwidthMax"] = append(structMap["IOWriteBandwidthMax"], newThrottle)
+ } else {
+ structMap["BlockIOWriteBandwidth"] = append(structMap["BlockIOWriteBandwidth"], newThrottle)
+ }
+ }
+ }
+
+ if res.BlkioWeightDevice != nil {
+ for _, entry := range res.BlkioWeightDevice {
+ newWeight := BlkioDev{
+ Device: fmt.Sprintf("/dev/block/%d:%d", entry.Major, entry.Minor),
+ Bytes: uint64(entry.Weight),
+ }
+ if v2 {
+ structMap["IODeviceWeight"] = append(structMap["IODeviceWeight"], newWeight)
+ } else {
+ structMap["BlockIODeviceWeight"] = append(structMap["BlockIODeviceWeight"], newWeight)
+ }
+
+ }
+ }
+
+ return uMap, sMap, bMap, iMap, structMap
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils.go b/vendor/github.com/containers/common/pkg/cgroups/utils.go
index 1fd45f40b..c7f86d7e1 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/utils.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/utils.go
@@ -2,14 +2,13 @@ package cgroups
import (
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
-
- "github.com/pkg/errors"
)
var TestMode bool
@@ -27,7 +26,7 @@ func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) {
p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name)
data, err := ioutil.ReadFile(p)
if err != nil {
- return nil, errors.Wrapf(err, "reading %s", p)
+ return nil, fmt.Errorf("reading %s: %w", p, err)
}
r := []uint64{}
for _, s := range strings.Split(string(data), " ") {
@@ -37,7 +36,7 @@ func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) {
}
v, err := strconv.ParseUint(s, 10, 64)
if err != nil {
- return nil, errors.Wrapf(err, "parsing %s", s)
+ return nil, fmt.Errorf("parsing %s: %w", s, err)
}
r = append(r, v)
}
@@ -93,7 +92,7 @@ func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) {
}
data, err := ioutil.ReadFile(parentPath)
if err != nil {
- return nil, errors.Wrapf(err, "open %s", path)
+ return nil, fmt.Errorf("open %s: %w", path, err)
}
if strings.Trim(string(data), "\n") != "" {
return data, nil
@@ -103,7 +102,7 @@ func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) {
return nil, err
}
if err := ioutil.WriteFile(path, data, 0o644); err != nil {
- return nil, errors.Wrapf(err, "write %s", path)
+ return nil, fmt.Errorf("write %s: %w", path, err)
}
return data, nil
}
@@ -165,12 +164,12 @@ func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) {
return false, nil
}
- if !os.IsNotExist(err) {
+ if !errors.Is(err, os.ErrNotExist) {
return false, err
}
if err := os.MkdirAll(cPath, 0o755); err != nil {
- return false, errors.Wrapf(err, "error creating cgroup for %s", controller)
+ return false, fmt.Errorf("error creating cgroup for %s: %w", controller, err)
}
return true, nil
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go
index bd37042cd..e4bd4d5df 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go
@@ -5,6 +5,7 @@ package cgroups
import (
"bytes"
+ "errors"
"fmt"
"os"
"path"
@@ -13,7 +14,6 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
diff --git a/vendor/github.com/containers/common/pkg/chown/chown_unix.go b/vendor/github.com/containers/common/pkg/chown/chown_unix.go
index 61327fd24..be4b8cfa5 100644
--- a/vendor/github.com/containers/common/pkg/chown/chown_unix.go
+++ b/vendor/github.com/containers/common/pkg/chown/chown_unix.go
@@ -4,11 +4,10 @@
package chown
import (
+ "fmt"
"os"
"path/filepath"
"syscall"
-
- "github.com/pkg/errors"
)
// ChangeHostPathOwnership changes the uid and gid ownership of a directory or file within the host.
@@ -17,11 +16,11 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
// Validate if host path can be chowned
isDangerous, err := DangerousHostPath(path)
if err != nil {
- return errors.Wrap(err, "failed to validate if host path is dangerous")
+ return fmt.Errorf("failed to validate if host path is dangerous: %w", err)
}
if isDangerous {
- return errors.Errorf("chowning host path %q is not allowed. You can manually `chown -R %d:%d %s`", path, uid, gid, path)
+ return fmt.Errorf("chowning host path %q is not allowed. You can manually `chown -R %d:%d %s`", path, uid, gid, path)
}
// Chown host path
@@ -42,13 +41,13 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
return nil
})
if err != nil {
- return errors.Wrap(err, "failed to chown recursively host path")
+ return fmt.Errorf("failed to chown recursively host path: %w", err)
}
} else {
// Get host path info
f, err := os.Lstat(path)
if err != nil {
- return errors.Wrap(err, "failed to get host path information")
+ return fmt.Errorf("failed to get host path information: %w", err)
}
// Get current ownership
@@ -57,7 +56,7 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
if uid != currentUID || gid != currentGID {
if err := os.Lchown(path, uid, gid); err != nil {
- return errors.Wrap(err, "failed to chown host path")
+ return fmt.Errorf("failed to chown host path: %w", err)
}
}
}
diff --git a/vendor/github.com/containers/common/pkg/chown/chown_windows.go b/vendor/github.com/containers/common/pkg/chown/chown_windows.go
index 0c4b8e1b5..8f3bba7ef 100644
--- a/vendor/github.com/containers/common/pkg/chown/chown_windows.go
+++ b/vendor/github.com/containers/common/pkg/chown/chown_windows.go
@@ -1,7 +1,7 @@
package chown
import (
- "github.com/pkg/errors"
+ "errors"
)
// ChangeHostPathOwnership changes the uid and gid ownership of a directory or file within the host.
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index e3d19ee88..3d90268cd 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -532,6 +532,11 @@ type NetworkConfig struct {
// NetworkConfigDir is where network configuration files are stored.
NetworkConfigDir string `toml:"network_config_dir,omitempty"`
+
+ // DNSBindPort is the port that should be used by dns forwarding daemon
+ // for netavark rootful bridges with dns enabled. This can be necessary
+ // when other dns forwarders run on the machine. 53 is used if unset.
+ DNSBindPort uint16 `toml:"dns_bind_port,omitempty,omitzero"`
}
type SubnetPool struct {
@@ -694,7 +699,7 @@ func addConfigs(dirPath string, configs []string) ([]string, error) {
}
},
)
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
err = nil
}
sort.Strings(newConfigs)
@@ -1152,7 +1157,7 @@ func ReadCustomConfig() (*Config, error) {
return nil, err
}
} else {
- if !os.IsNotExist(err) {
+ if !errors.Is(err, os.ErrNotExist) {
return nil, err
}
}
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 8fd951c4a..d1ac7c0e8 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -325,6 +325,13 @@ default_sysctls = [
#
#network_config_dir = "/etc/cni/net.d/"
+# Port to use for dns forwarding daemon with netavark in rootful bridge
+# mode and dns enabled.
+# Using an alternate port might be useful if other dns services should
+# run on the machine.
+#
+#dns_bind_port = 53
+
[engine]
# Index to the active service
#
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index f381818f1..161a9c8d6 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -230,6 +230,7 @@ func DefaultConfig() (*Config, error) {
DefaultNetwork: "podman",
DefaultSubnet: DefaultSubnet,
DefaultSubnetPools: DefaultSubnetPools,
+ DNSBindPort: 0,
CNIPluginDirs: DefaultCNIPluginDirs,
},
Engine: *defaultEngineConfig,
diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go
index c85227fe4..be6030fdb 100644
--- a/vendor/github.com/containers/common/pkg/config/pull_policy.go
+++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go
@@ -15,18 +15,18 @@ import (
type PullPolicy int
const (
- // Always pull the image.
+ // Always pull the image and throw an error if the pull fails.
PullPolicyAlways PullPolicy = iota
// Pull the image only if it could not be found in the local containers
- // storage.
+ // storage. Throw an error if no image could be found and the pull fails.
PullPolicyMissing
// Never pull the image but use the one from the local containers
- // storage.
+ // storage. Throw an error if no image could be found.
PullPolicyNever
- // Pull if the image on the registry is new than the one in the local
+ // Pull if the image on the registry is newer than the one in the local
// containers storage. An image is considered to be newer when the
// digests are different. Comparing the time stamps is prone to
- // errors.
+ // errors. Pull errors are suppressed if a local image was found.
PullPolicyNewer
// Ideally this should be the first `ioata` but backwards compatibility
diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go
index 53650efc9..dee66107f 100644
--- a/vendor/github.com/containers/common/pkg/filters/filters.go
+++ b/vendor/github.com/containers/common/pkg/filters/filters.go
@@ -8,14 +8,13 @@ import (
"time"
"github.com/containers/common/pkg/timetype"
- "github.com/pkg/errors"
)
// ComputeUntilTimestamp extracts until timestamp from filters
func ComputeUntilTimestamp(filterValues []string) (time.Time, error) {
invalid := time.Time{}
if len(filterValues) != 1 {
- return invalid, errors.Errorf("specify exactly one timestamp for until")
+ return invalid, fmt.Errorf("specify exactly one timestamp for until")
}
ts, err := timetype.GetTimestamp(filterValues[0], time.Now())
if err != nil {
diff --git a/vendor/github.com/containers/common/pkg/hooks/0.1.0/hook.go b/vendor/github.com/containers/common/pkg/hooks/0.1.0/hook.go
new file mode 100644
index 000000000..88ff5990f
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/0.1.0/hook.go
@@ -0,0 +1,88 @@
+// Package hook is the 0.1.0 hook configuration structure.
+package hook
+
+import (
+ "encoding/json"
+ "errors"
+ "strings"
+
+ current "github.com/containers/common/pkg/hooks/1.0.0"
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Version is the hook configuration version defined in this package.
+const Version = "0.1.0"
+
+// Hook is the hook configuration structure.
+type Hook struct {
+ Hook *string `json:"hook"`
+ Arguments []string `json:"arguments,omitempty"`
+
+ // https://github.com/cri-o/cri-o/pull/1235
+ Stages []string `json:"stages"`
+ Stage []string `json:"stage"`
+
+ Cmds []string `json:"cmds,omitempty"`
+ Cmd []string `json:"cmd,omitempty"`
+
+ Annotations []string `json:"annotations,omitempty"`
+ Annotation []string `json:"annotation,omitempty"`
+
+ HasBindMounts *bool `json:"hasbindmounts,omitempty"`
+}
+
+func Read(content []byte) (hook *current.Hook, err error) {
+ var raw Hook
+
+ if err = json.Unmarshal(content, &raw); err != nil {
+ return nil, err
+ }
+
+ if raw.Hook == nil {
+ return nil, errors.New("missing required property: hook")
+ }
+
+ if raw.Stages == nil {
+ raw.Stages = raw.Stage
+ } else if raw.Stage != nil {
+ return nil, errors.New("cannot set both 'stage' and 'stages'")
+ }
+ if raw.Stages == nil {
+ return nil, errors.New("missing required property: stages")
+ }
+
+ if raw.Cmds == nil {
+ raw.Cmds = raw.Cmd
+ } else if raw.Cmd != nil {
+ return nil, errors.New("cannot set both 'cmd' and 'cmds'")
+ }
+
+ if raw.Annotations == nil {
+ raw.Annotations = raw.Annotation
+ } else if raw.Annotation != nil {
+ return nil, errors.New("cannot set both 'annotation' and 'annotations'")
+ }
+
+ hook = &current.Hook{
+ Version: current.Version,
+ Hook: rspec.Hook{
+ Path: *raw.Hook,
+ },
+ When: current.When{
+ Commands: raw.Cmds,
+ HasBindMounts: raw.HasBindMounts,
+ Or: true,
+ },
+ Stages: raw.Stages,
+ }
+ if raw.Arguments != nil {
+ hook.Hook.Args = append([]string{*raw.Hook}, raw.Arguments...)
+ }
+ if raw.Annotations != nil {
+ hook.When.Annotations = map[string]string{
+ ".*": strings.Join(raw.Annotations, "|"),
+ }
+ }
+
+ return hook, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go b/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go
new file mode 100644
index 000000000..71f940a64
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go
@@ -0,0 +1,89 @@
+// Package hook is the 1.0.0 hook configuration structure.
+package hook
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "regexp"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Version is the hook configuration version defined in this package.
+const Version = "1.0.0"
+
+// Hook is the hook configuration structure.
+type Hook struct {
+ Version string `json:"version"`
+ Hook rspec.Hook `json:"hook"`
+ When When `json:"when"`
+ Stages []string `json:"stages"`
+}
+
+// Read reads hook JSON bytes, verifies them, and returns the hook configuration.
+func Read(content []byte) (hook *Hook, err error) {
+ if err = json.Unmarshal(content, &hook); err != nil {
+ return nil, err
+ }
+ return hook, nil
+}
+
+// Validate performs load-time hook validation.
+func (hook *Hook) Validate(extensionStages []string) (err error) {
+ if hook == nil {
+ return errors.New("nil hook")
+ }
+
+ if hook.Version != Version {
+ return fmt.Errorf("unexpected hook version %q (expecting %v)", hook.Version, Version)
+ }
+
+ if hook.Hook.Path == "" {
+ return errors.New("missing required property: hook.path")
+ }
+
+ if _, err := os.Stat(hook.Hook.Path); err != nil {
+ return err
+ }
+
+ for key, value := range hook.When.Annotations {
+ if _, err = regexp.Compile(key); err != nil {
+ return fmt.Errorf("invalid annotation key %q: %w", key, err)
+ }
+ if _, err = regexp.Compile(value); err != nil {
+ return fmt.Errorf("invalid annotation value %q: %w", value, err)
+ }
+ }
+
+ for _, command := range hook.When.Commands {
+ if _, err = regexp.Compile(command); err != nil {
+ return fmt.Errorf("invalid command %q: %w", command, err)
+ }
+ }
+
+ if hook.Stages == nil {
+ return errors.New("missing required property: stages")
+ }
+
+ validStages := map[string]bool{
+ "createContainer": true,
+ "createRuntime": true,
+ "prestart": true,
+ "poststart": true,
+ "poststop": true,
+ "startContainer": true,
+ }
+ for _, stage := range extensionStages {
+ validStages[stage] = true
+ }
+
+ for _, stage := range hook.Stages {
+ if !validStages[stage] {
+ return fmt.Errorf("unknown stage %q", stage)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/hooks/1.0.0/when.go b/vendor/github.com/containers/common/pkg/hooks/1.0.0/when.go
new file mode 100644
index 000000000..a1351890f
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/1.0.0/when.go
@@ -0,0 +1,96 @@
+package hook
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// When holds hook-injection conditions.
+type When struct {
+ Always *bool `json:"always,omitempty"`
+ Annotations map[string]string `json:"annotations,omitempty"`
+ Commands []string `json:"commands,omitempty"`
+ HasBindMounts *bool `json:"hasBindMounts,omitempty"`
+
+ // Or enables any-of matching.
+ //
+ // Deprecated: this property is for is backwards-compatibility with
+ // 0.1.0 hooks. It will be removed when we drop support for them.
+ Or bool `json:"-"`
+}
+
+// Match returns true if the given conditions match the configuration.
+func (when *When) Match(config *rspec.Spec, annotations map[string]string, hasBindMounts bool) (match bool, err error) {
+ matches := 0
+
+ if when.Always != nil {
+ if *when.Always {
+ if when.Or {
+ return true, nil
+ }
+ matches++
+ } else if !when.Or {
+ return false, nil
+ }
+ }
+
+ if when.HasBindMounts != nil {
+ if *when.HasBindMounts && hasBindMounts {
+ if when.Or {
+ return true, nil
+ }
+ matches++
+ } else if !when.Or {
+ return false, nil
+ }
+ }
+
+ for keyPattern, valuePattern := range when.Annotations {
+ match := false
+ for key, value := range annotations {
+ match, err = regexp.MatchString(keyPattern, key)
+ if err != nil {
+ return false, fmt.Errorf("annotation key: %w", err)
+ }
+ if match {
+ match, err = regexp.MatchString(valuePattern, value)
+ if err != nil {
+ return false, fmt.Errorf("annotation value: %w", err)
+ }
+ if match {
+ break
+ }
+ }
+ }
+ if match {
+ if when.Or {
+ return true, nil
+ }
+ matches++
+ } else if !when.Or {
+ return false, nil
+ }
+ }
+
+ if config.Process != nil && len(when.Commands) > 0 {
+ if len(config.Process.Args) == 0 {
+ return false, errors.New("process.args must have at least one entry")
+ }
+ command := config.Process.Args[0]
+ for _, cmdPattern := range when.Commands {
+ match, err := regexp.MatchString(cmdPattern, command)
+ if err != nil {
+ return false, fmt.Errorf("command: %w", err)
+ }
+ if match {
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+
+ return matches > 0, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/hooks/README.md b/vendor/github.com/containers/common/pkg/hooks/README.md
new file mode 100644
index 000000000..f6a03a775
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/README.md
@@ -0,0 +1,22 @@
+# OCI Hooks Configuration
+
+For POSIX platforms, the [OCI runtime configuration][runtime-spec] supports [hooks][spec-hooks] for configuring custom actions related to the life cycle of the container.
+The way you enable the hooks above is by editing the OCI runtime configuration before running the OCI runtime (e.g. [`runc`][runc]).
+CRI-O and `podman create` create the OCI configuration for you, and this documentation allows developers to configure them to set their intended hooks.
+
+One problem with hooks is that the runtime actually stalls execution of the container before running the hooks and stalls completion of the container, until all hooks complete.
+This can cause some performance issues.
+Also a lot of hooks just check if certain configuration is set and then exit early, without doing anything.
+For example the [oci-systemd-hook][] only executes if the command is `init` or `systemd`, otherwise it just exits.
+This means if we automatically enabled all hooks, every container would have to execute `oci-systemd-hook`, even if they don't run systemd inside of the container.
+Performance would also suffer if we executed each hook at each stage ([pre-start][], [post-start][], and [post-stop][]).
+
+The hooks configuration is documented in [`oci-hooks.5`](docs/oci-hooks.5.md).
+
+[oci-systemd-hook]: https://github.com/projectatomic/oci-systemd-hook
+[post-start]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#poststart
+[post-stop]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#poststop
+[pre-start]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#prestart
+[runc]: https://github.com/opencontainers/runc
+[runtime-spec]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/spec.md
+[spec-hooks]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
diff --git a/vendor/github.com/containers/common/pkg/hooks/exec/exec.go b/vendor/github.com/containers/common/pkg/hooks/exec/exec.go
new file mode 100644
index 000000000..bc639245f
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/exec/exec.go
@@ -0,0 +1,69 @@
+// Package exec provides utilities for executing Open Container Initiative runtime hooks.
+package exec
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ osexec "os/exec"
+ "time"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+)
+
+// DefaultPostKillTimeout is the recommended default post-kill timeout.
+const DefaultPostKillTimeout = time.Duration(10) * time.Second
+
+// Run executes the hook and waits for it to complete or for the
+// context or hook-specified timeout to expire.
+func Run(ctx context.Context, hook *rspec.Hook, state []byte, stdout io.Writer, stderr io.Writer, postKillTimeout time.Duration) (hookErr, err error) {
+ cmd := osexec.Cmd{
+ Path: hook.Path,
+ Args: hook.Args,
+ Env: hook.Env,
+ Stdin: bytes.NewReader(state),
+ Stdout: stdout,
+ Stderr: stderr,
+ }
+ if cmd.Env == nil {
+ cmd.Env = []string{}
+ }
+
+ if hook.Timeout != nil {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, time.Duration(*hook.Timeout)*time.Second)
+ defer cancel()
+ }
+
+ err = cmd.Start()
+ if err != nil {
+ return err, err
+ }
+ exit := make(chan error, 1)
+ go func() {
+ err := cmd.Wait()
+ if err != nil {
+ err = fmt.Errorf("executing %v: %w", cmd.Args, err)
+ }
+ exit <- err
+ }()
+
+ select {
+ case err = <-exit:
+ return err, err
+ case <-ctx.Done():
+ if err := cmd.Process.Kill(); err != nil {
+ logrus.Errorf("Failed to kill pid %v", cmd.Process)
+ }
+ timer := time.NewTimer(postKillTimeout)
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ err = fmt.Errorf("failed to reap process within %s of the kill signal", postKillTimeout)
+ case err = <-exit:
+ }
+ return err, ctx.Err()
+ }
+}
diff --git a/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go b/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go
new file mode 100644
index 000000000..72d4b8979
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go
@@ -0,0 +1,72 @@
+package exec
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pmezard/go-difflib/difflib"
+ "github.com/sirupsen/logrus"
+)
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+}
+
+// RuntimeConfigFilter calls a series of hooks. But instead of
+// passing container state on their standard input,
+// RuntimeConfigFilter passes the proposed runtime configuration (and
+// reads back a possibly-altered form from their standard output).
+func RuntimeConfigFilter(ctx context.Context, hooks []spec.Hook, config *spec.Spec, postKillTimeout time.Duration) (hookErr, err error) {
+ data, err := json.Marshal(config)
+ if err != nil {
+ return nil, err
+ }
+ for i, hook := range hooks {
+ hook := hook
+ var stdout bytes.Buffer
+ hookErr, err = Run(ctx, &hook, data, &stdout, nil, postKillTimeout)
+ if err != nil {
+ return hookErr, err
+ }
+
+ data = stdout.Bytes()
+ var newConfig spec.Spec
+ err = json.Unmarshal(data, &newConfig)
+ if err != nil {
+ logrus.Debugf("invalid JSON from config-filter hook %d:\n%s", i, string(data))
+ return nil, fmt.Errorf("unmarshal output from config-filter hook %d: %w", i, err)
+ }
+
+ if !reflect.DeepEqual(config, &newConfig) {
+ oldConfig := spewConfig.Sdump(config)
+ newConfig := spewConfig.Sdump(&newConfig)
+ diff, err := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+ A: difflib.SplitLines(oldConfig),
+ B: difflib.SplitLines(newConfig),
+ FromFile: "Old",
+ FromDate: "",
+ ToFile: "New",
+ ToDate: "",
+ Context: 1,
+ })
+ if err == nil {
+ logrus.Debugf("precreate hook %d made configuration changes:\n%s", i, diff)
+ } else {
+ logrus.Warnf("Precreate hook %d made configuration changes, but we could not compute a diff: %v", i, err)
+ }
+ }
+
+ *config = newConfig
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/hooks/hooks.go b/vendor/github.com/containers/common/pkg/hooks/hooks.go
new file mode 100644
index 000000000..6d3747e55
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/hooks.go
@@ -0,0 +1,146 @@
+// Package hooks implements hook configuration and handling for CRI-O and libpod.
+package hooks
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ current "github.com/containers/common/pkg/hooks/1.0.0"
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+)
+
+// Version is the current hook configuration version.
+const Version = current.Version
+
+const (
+ // DefaultDir is the default directory containing system hook configuration files.
+ DefaultDir = "/usr/share/containers/oci/hooks.d"
+
+ // OverrideDir is the directory for hook configuration files overriding the default entries.
+ OverrideDir = "/etc/containers/oci/hooks.d"
+)
+
+// Manager provides an opaque interface for managing CRI-O hooks.
+type Manager struct {
+ hooks map[string]*current.Hook
+ directories []string
+ extensionStages []string
+ lock sync.Mutex
+}
+
+type namedHook struct {
+ name string
+ hook *current.Hook
+}
+
+// New creates a new hook manager. Directories are ordered by
+// increasing preference (hook configurations in later directories
+// override configurations with the same filename from earlier
+// directories).
+//
+// extensionStages allows callers to add additional stages beyond
+// those specified in the OCI Runtime Specification and to control
+// OCI-defined stages instead of delegating to the OCI runtime. See
+// Hooks() for more information.
+func New(ctx context.Context, directories []string, extensionStages []string) (manager *Manager, err error) {
+ manager = &Manager{
+ hooks: map[string]*current.Hook{},
+ directories: directories,
+ extensionStages: extensionStages,
+ }
+
+ for _, dir := range directories {
+ err = ReadDir(dir, manager.extensionStages, manager.hooks)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ }
+
+ return manager, nil
+}
+
+// filenames returns sorted hook entries.
+func (m *Manager) namedHooks() (hooks []*namedHook) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ hooks = make([]*namedHook, len(m.hooks))
+ i := 0
+ for name, hook := range m.hooks {
+ hooks[i] = &namedHook{
+ name: name,
+ hook: hook,
+ }
+ i++
+ }
+
+ return hooks
+}
+
+// Hooks injects OCI runtime hooks for a given container configuration.
+//
+// If extensionStages was set when initializing the Manager,
+// matching hooks requesting those stages will be returned in
+// extensionStageHooks. This takes precedence over their inclusion in
+// the OCI configuration. For example:
+//
+// manager, err := New(ctx, []string{DefaultDir}, []string{"poststop"})
+// extensionStageHooks, err := manager.Hooks(config, annotations, hasBindMounts)
+//
+// will have any matching post-stop hooks in extensionStageHooks and
+// will not insert them into config.Hooks.Poststop.
+func (m *Manager) Hooks(config *rspec.Spec, annotations map[string]string, hasBindMounts bool) (extensionStageHooks map[string][]rspec.Hook, err error) {
+ hooks := m.namedHooks()
+ sort.Slice(hooks, func(i, j int) bool { return strings.ToLower(hooks[i].name) < strings.ToLower(hooks[j].name) })
+ localStages := map[string]bool{} // stages destined for extensionStageHooks
+ for _, stage := range m.extensionStages {
+ localStages[stage] = true
+ }
+ for _, namedHook := range hooks {
+ match, err := namedHook.hook.When.Match(config, annotations, hasBindMounts)
+ if err != nil {
+ return extensionStageHooks, fmt.Errorf("matching hook %q: %w", namedHook.name, err)
+ }
+ if match {
+ logrus.Debugf("hook %s matched; adding to stages %v", namedHook.name, namedHook.hook.Stages)
+ if config.Hooks == nil {
+ config.Hooks = &rspec.Hooks{}
+ }
+ for _, stage := range namedHook.hook.Stages {
+ if _, ok := localStages[stage]; ok {
+ if extensionStageHooks == nil {
+ extensionStageHooks = map[string][]rspec.Hook{}
+ }
+ extensionStageHooks[stage] = append(extensionStageHooks[stage], namedHook.hook.Hook)
+ } else {
+ switch stage {
+ case "createContainer":
+ config.Hooks.CreateContainer = append(config.Hooks.CreateContainer, namedHook.hook.Hook)
+ case "createRuntime":
+ config.Hooks.CreateRuntime = append(config.Hooks.CreateRuntime, namedHook.hook.Hook)
+ case "prestart":
+ config.Hooks.Prestart = append(config.Hooks.Prestart, namedHook.hook.Hook)
+ case "poststart":
+ config.Hooks.Poststart = append(config.Hooks.Poststart, namedHook.hook.Hook)
+ case "poststop":
+ config.Hooks.Poststop = append(config.Hooks.Poststop, namedHook.hook.Hook)
+ case "startContainer":
+ config.Hooks.StartContainer = append(config.Hooks.StartContainer, namedHook.hook.Hook)
+ default:
+ return extensionStageHooks, fmt.Errorf("hook %q: unknown stage %q", namedHook.name, stage)
+ }
+ }
+ }
+ } else {
+ logrus.Debugf("hook %s did not match", namedHook.name)
+ }
+ }
+
+ return extensionStageHooks, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/hooks/monitor.go b/vendor/github.com/containers/common/pkg/hooks/monitor.go
new file mode 100644
index 000000000..e9facf0d0
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/monitor.go
@@ -0,0 +1,66 @@
+package hooks
+
+import (
+ "context"
+
+ current "github.com/containers/common/pkg/hooks/1.0.0"
+ "github.com/fsnotify/fsnotify"
+ "github.com/sirupsen/logrus"
+)
+
+// Monitor dynamically monitors hook directories for additions,
+// updates, and removals.
+//
+// This function writes two empty structs to the sync channel: the
+// first is written after the watchers are established and the second
+// when this function exits. The expected usage is:
+//
+// ctx, cancel := context.WithCancel(context.Background())
+// sync := make(chan error, 2)
+// go m.Monitor(ctx, sync)
+// err := <-sync // block until writers are established
+// if err != nil {
+// return err // failed to establish watchers
+// }
+// // do stuff
+// cancel()
+// err = <-sync // block until monitor finishes
+func (m *Manager) Monitor(ctx context.Context, sync chan<- error) {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ sync <- err
+ return
+ }
+ defer watcher.Close()
+
+ for _, dir := range m.directories {
+ err = watcher.Add(dir)
+ if err != nil {
+ logrus.Errorf("Failed to watch %q for hooks", dir)
+ sync <- err
+ return
+ }
+ logrus.Debugf("monitoring %q for hooks", dir)
+ }
+
+ sync <- nil
+
+ for {
+ select {
+ case event := <-watcher.Events:
+ m.hooks = make(map[string]*current.Hook)
+ for _, dir := range m.directories {
+ err = ReadDir(dir, m.extensionStages, m.hooks)
+ if err != nil {
+ logrus.Errorf("Failed loading hooks for %s: %v", event.Name, err)
+ }
+ }
+ case <-ctx.Done():
+ err = ctx.Err()
+ logrus.Debugf("hook monitoring canceled: %v", err)
+ sync <- err
+ close(sync)
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/containers/common/pkg/hooks/read.go b/vendor/github.com/containers/common/pkg/hooks/read.go
new file mode 100644
index 000000000..25cf7be99
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/read.go
@@ -0,0 +1,101 @@
+// Package hooks implements CRI-O's hook handling.
+package hooks
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ old "github.com/containers/common/pkg/hooks/0.1.0"
+ current "github.com/containers/common/pkg/hooks/1.0.0"
+ "github.com/sirupsen/logrus"
+)
+
+type reader func(content []byte) (*current.Hook, error)
+
+var (
+ // ErrNoJSONSuffix represents hook-add attempts where the filename
+ // does not end in '.json'.
+ ErrNoJSONSuffix = errors.New("hook filename does not end in '.json'")
+
+ // Readers registers per-version hook readers.
+ Readers = map[string]reader{}
+)
+
+// Read reads a hook JSON file, verifies it, and returns the hook configuration.
+func Read(path string, extensionStages []string) (*current.Hook, error) {
+ if !strings.HasSuffix(path, ".json") {
+ return nil, ErrNoJSONSuffix
+ }
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ hook, err := read(content)
+ if err != nil {
+ return nil, fmt.Errorf("parsing hook %q: %w", path, err)
+ }
+ err = hook.Validate(extensionStages)
+ return hook, err
+}
+
+func read(content []byte) (hook *current.Hook, err error) {
+ var ver version
+ if err := json.Unmarshal(content, &ver); err != nil {
+ return nil, fmt.Errorf("version check: %w", err)
+ }
+ reader, ok := Readers[ver.Version]
+ if !ok {
+ return nil, fmt.Errorf("unrecognized hook version: %q", ver.Version)
+ }
+
+ hook, err = reader(content)
+ if err != nil {
+ return hook, fmt.Errorf("%s: %v", ver.Version, err)
+ }
+ return hook, err
+}
+
+// ReadDir reads hook JSON files from a directory into the given map,
+// clobbering any previous entries with the same filenames.
+func ReadDir(path string, extensionStages []string, hooks map[string]*current.Hook) error {
+ logrus.Debugf("reading hooks from %s", path)
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return err
+ }
+ res := err
+ for _, file := range files {
+ filePath := filepath.Join(path, file.Name())
+ hook, err := Read(filePath, extensionStages)
+ if err != nil {
+ if err == ErrNoJSONSuffix {
+ continue
+ }
+ if errors.Is(err, os.ErrNotExist) {
+ if err2, ok := err.(*os.PathError); ok && err2.Path == filePath {
+ continue
+ }
+ }
+ if res == nil {
+ res = err
+ } else {
+ res = fmt.Errorf("%v: %w", err, res)
+ }
+ continue
+ }
+ hooks[file.Name()] = hook
+ logrus.Debugf("added hook %s", filePath)
+ }
+ return res
+}
+
+func init() {
+ Readers[current.Version] = current.Read
+ Readers[old.Version] = old.Read
+ Readers[""] = old.Read
+}
diff --git a/vendor/github.com/containers/common/pkg/hooks/version.go b/vendor/github.com/containers/common/pkg/hooks/version.go
new file mode 100644
index 000000000..637d8e2f4
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/hooks/version.go
@@ -0,0 +1,6 @@
+package hooks
+
+// version a structure for checking the version of a hook configuration.
+type version struct {
+ Version string `json:"version"`
+}
diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go
index 75ffac06c..d2279ab0e 100644
--- a/vendor/github.com/containers/common/pkg/manifests/manifests.go
+++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go
@@ -2,13 +2,14 @@ package manifests
import (
"encoding/json"
+ "errors"
+ "fmt"
"os"
"github.com/containers/image/v5/manifest"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// List is a generic interface for manipulating a manifest list or an image
@@ -73,7 +74,7 @@ func Create() List {
// AddInstance adds an entry for the specified manifest digest, with assorted
// additional information specified in parameters, to the list or index.
func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error {
- if err := l.Remove(manifestDigest); err != nil && !os.IsNotExist(errors.Cause(err)) {
+ if err := l.Remove(manifestDigest); err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
@@ -113,7 +114,7 @@ func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, man
// Remove filters out any instances in the list which match the specified digest.
func (l *list) Remove(instanceDigest digest.Digest) error {
- err := errors.Wrapf(os.ErrNotExist, "no instance matching digest %q found in manifest list", instanceDigest)
+ err := fmt.Errorf("no instance matching digest %q found in manifest list: %w", instanceDigest, os.ErrNotExist)
newDockerManifests := make([]manifest.Schema2ManifestDescriptor, 0, len(l.docker.Manifests))
for i := range l.docker.Manifests {
if l.docker.Manifests[i].Digest != instanceDigest {
@@ -141,7 +142,7 @@ func (l *list) findDocker(instanceDigest digest.Digest) (*manifest.Schema2Manife
return &l.docker.Manifests[i], nil
}
}
- return nil, errors.Wrapf(ErrDigestNotFound, "no Docker manifest matching digest %q was found in list", instanceDigest.String())
+ return nil, fmt.Errorf("no Docker manifest matching digest %q was found in list: %w", instanceDigest.String(), ErrDigestNotFound)
}
func (l *list) findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) {
@@ -150,7 +151,7 @@ func (l *list) findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) {
return &l.oci.Manifests[i], nil
}
}
- return nil, errors.Wrapf(ErrDigestNotFound, "no OCI manifest matching digest %q was found in list", instanceDigest.String())
+ return nil, fmt.Errorf("no OCI manifest matching digest %q was found in list: %w", instanceDigest.String(), ErrDigestNotFound)
}
// SetURLs sets the URLs where the manifest might also be found.
@@ -370,10 +371,10 @@ func FromBlob(manifestBytes []byte) (List, error) {
}
switch manifestType {
default:
- return nil, errors.Wrapf(ErrManifestTypeNotSupported, "unable to load manifest list: unsupported format %q", manifestType)
+ return nil, fmt.Errorf("unable to load manifest list: unsupported format %q: %w", manifestType, ErrManifestTypeNotSupported)
case manifest.DockerV2ListMediaType:
if err := json.Unmarshal(manifestBytes, &list.docker); err != nil {
- return nil, errors.Wrapf(err, "unable to parse Docker manifest list from image")
+ return nil, fmt.Errorf("unable to parse Docker manifest list from image: %w", err)
}
for _, m := range list.docker.Manifests {
list.oci.Manifests = append(list.oci.Manifests, v1.Descriptor{
@@ -391,7 +392,7 @@ func FromBlob(manifestBytes []byte) (List, error) {
}
case v1.MediaTypeImageIndex:
if err := json.Unmarshal(manifestBytes, &list.oci); err != nil {
- return nil, errors.Wrapf(err, "unable to parse OCIv1 manifest list")
+ return nil, fmt.Errorf("unable to parse OCIv1 manifest list: %w", err)
}
for _, m := range list.oci.Manifests {
platform := m.Platform
@@ -451,26 +452,26 @@ func (l *list) Serialize(mimeType string) ([]byte, error) {
if l.preferOCI() {
res, err = json.Marshal(&l.oci)
if err != nil {
- return nil, errors.Wrapf(err, "error marshalling OCI image index")
+ return nil, fmt.Errorf("error marshalling OCI image index: %w", err)
}
} else {
res, err = json.Marshal(&l.docker)
if err != nil {
- return nil, errors.Wrapf(err, "error marshalling Docker manifest list")
+ return nil, fmt.Errorf("error marshalling Docker manifest list: %w", err)
}
}
case v1.MediaTypeImageIndex:
res, err = json.Marshal(&l.oci)
if err != nil {
- return nil, errors.Wrapf(err, "error marshalling OCI image index")
+ return nil, fmt.Errorf("error marshalling OCI image index: %w", err)
}
case manifest.DockerV2ListMediaType:
res, err = json.Marshal(&l.docker)
if err != nil {
- return nil, errors.Wrapf(err, "error marshalling Docker manifest list")
+ return nil, fmt.Errorf("error marshalling Docker manifest list: %w", err)
}
default:
- return nil, errors.Wrapf(ErrManifestTypeNotSupported, "serializing list to type %q not implemented", mimeType)
+ return nil, fmt.Errorf("serializing list to type %q not implemented: %w", mimeType, ErrManifestTypeNotSupported)
}
return res, nil
}
diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go
index 43b783e0c..15e932129 100644
--- a/vendor/github.com/containers/common/pkg/parse/parse.go
+++ b/vendor/github.com/containers/common/pkg/parse/parse.go
@@ -4,12 +4,12 @@ package parse
// user input and is shared either amongst container engine subcommands
import (
+ "errors"
+ "fmt"
"os"
"path"
"path/filepath"
"strings"
-
- "github.com/pkg/errors"
)
// ValidateVolumeOpts validates a volume's options
@@ -21,7 +21,7 @@ func ValidateVolumeOpts(options []string) ([]string, error) {
if strings.Contains(opt, "upperdir") {
foundUpperDir++
if foundUpperDir > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", "))
}
finalOpts = append(finalOpts, opt)
continue
@@ -29,7 +29,7 @@ func ValidateVolumeOpts(options []string) ([]string, error) {
if strings.Contains(opt, "workdir") {
foundWorkDir++
if foundWorkDir > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", "))
}
finalOpts = append(finalOpts, opt)
continue
@@ -43,42 +43,42 @@ func ValidateVolumeOpts(options []string) ([]string, error) {
case "noexec", "exec":
foundExec++
if foundExec > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 'noexec' or 'exec' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 'noexec' or 'exec' option", strings.Join(options, ", "))
}
case "nodev", "dev":
foundDev++
if foundDev > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 'nodev' or 'dev' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 'nodev' or 'dev' option", strings.Join(options, ", "))
}
case "nosuid", "suid":
foundSuid++
if foundSuid > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 'nosuid' or 'suid' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 'nosuid' or 'suid' option", strings.Join(options, ", "))
}
case "rw", "ro":
foundRWRO++
if foundRWRO > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", "))
}
case "z", "Z", "O":
foundLabelChange++
if foundLabelChange > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", "))
}
case "U":
foundChown++
if foundChown > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 'U' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 'U' option", strings.Join(options, ", "))
}
case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable":
foundRootPropagation++
if foundRootPropagation > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", "))
}
case "bind", "rbind":
bindType++
if bindType > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]bind' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 '[r]bind' option", strings.Join(options, ", "))
}
case "cached", "delegated":
// The discarded ops are OS X specific volume options
@@ -91,10 +91,10 @@ func ValidateVolumeOpts(options []string) ([]string, error) {
case "copy", "nocopy":
foundCopy++
if foundCopy > 1 {
- return nil, errors.Errorf("invalid options %q, can only specify 1 'copy' or 'nocopy' option", strings.Join(options, ", "))
+ return nil, fmt.Errorf("invalid options %q, can only specify 1 'copy' or 'nocopy' option", strings.Join(options, ", "))
}
default:
- return nil, errors.Errorf("invalid option type %q", opt)
+ return nil, fmt.Errorf("invalid option type %q", opt)
}
finalOpts = append(finalOpts, opt)
}
@@ -113,7 +113,7 @@ func Device(device string) (src, dest, permissions string, err error) {
switch len(arr) {
case 3:
if !isValidDeviceMode(arr[2]) {
- return "", "", "", errors.Errorf("invalid device mode: %s", arr[2])
+ return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2])
}
permissions = arr[2]
fallthrough
@@ -122,7 +122,7 @@ func Device(device string) (src, dest, permissions string, err error) {
permissions = arr[1]
} else {
if arr[1] == "" || arr[1][0] != '/' {
- return "", "", "", errors.Errorf("invalid device mode: %s", arr[1])
+ return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1])
}
dest = arr[1]
}
@@ -134,7 +134,7 @@ func Device(device string) (src, dest, permissions string, err error) {
}
fallthrough
default:
- return "", "", "", errors.Errorf("invalid device specification: %s", device)
+ return "", "", "", fmt.Errorf("invalid device specification: %s", device)
}
if dest == "" {
@@ -184,7 +184,7 @@ func ValidateVolumeCtrDir(ctrDir string) error {
return errors.New("container directory cannot be empty")
}
if !path.IsAbs(ctrDir) {
- return errors.Errorf("invalid container path %q, must be an absolute path", ctrDir)
+ return fmt.Errorf("invalid container path %q, must be an absolute path", ctrDir)
}
return nil
}
diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go
index 6fb52a014..8b3599229 100644
--- a/vendor/github.com/containers/common/pkg/parse/parse_unix.go
+++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go
@@ -4,12 +4,12 @@
package parse
import (
+ "fmt"
"os"
"path/filepath"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/devices"
- "github.com/pkg/errors"
)
func DeviceFromPath(device string) ([]devices.Device, error) {
@@ -18,7 +18,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
return nil, err
}
if unshare.IsRootless() && src != dst {
- return nil, errors.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst)
+ return nil, fmt.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst)
}
srcInfo, err := os.Stat(src)
if err != nil {
@@ -29,7 +29,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
devs := make([]devices.Device, 0, 1)
dev, err := devices.DeviceFromPath(src, permissions)
if err != nil {
- return nil, errors.Wrapf(err, "%s is not a valid device", src)
+ return nil, fmt.Errorf("%s is not a valid device: %w", src, err)
}
dev.Path = dst
devs = append(devs, *dev)
@@ -39,7 +39,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
// If source device is a directory
srcDevices, err := devices.GetDevices(src)
if err != nil {
- return nil, errors.Wrapf(err, "error getting source devices from directory %s", src)
+ return nil, fmt.Errorf("error getting source devices from directory %s: %w", src, err)
}
devs := make([]devices.Device, 0, len(srcDevices))
for _, d := range srcDevices {
diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go
index 321131f69..a838c706a 100644
--- a/vendor/github.com/containers/common/pkg/retry/retry.go
+++ b/vendor/github.com/containers/common/pkg/retry/retry.go
@@ -12,7 +12,6 @@ import (
"github.com/docker/distribution/registry/api/errcode"
errcodev2 "github.com/docker/distribution/registry/api/v2"
"github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -51,8 +50,6 @@ func IfNecessary(ctx context.Context, operation func() error, options *Options)
}
func isRetryable(err error) bool {
- err = errors.Cause(err)
-
switch err {
case nil:
return false
diff --git a/vendor/github.com/containers/common/pkg/seccomp/conversion.go b/vendor/github.com/containers/common/pkg/seccomp/conversion.go
index cd599f0f3..01fe11cd5 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/conversion.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/conversion.go
@@ -7,7 +7,6 @@ import (
"fmt"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
var (
@@ -108,7 +107,7 @@ func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) {
for _, arch := range spec.Architectures {
newArch, err := specArchToSeccompArch(arch)
if err != nil {
- return nil, errors.Wrap(err, "convert spec arch")
+ return nil, fmt.Errorf("convert spec arch: %w", err)
}
res.Architectures = append(res.Architectures, newArch)
}
@@ -116,7 +115,7 @@ func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) {
// Convert default action
newDefaultAction, err := specActionToSeccompAction(spec.DefaultAction)
if err != nil {
- return nil, errors.Wrap(err, "convert default action")
+ return nil, fmt.Errorf("convert default action: %w", err)
}
res.DefaultAction = newDefaultAction
res.DefaultErrnoRet = spec.DefaultErrnoRet
@@ -125,7 +124,7 @@ func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) {
for _, call := range spec.Syscalls {
newAction, err := specActionToSeccompAction(call.Action)
if err != nil {
- return nil, errors.Wrap(err, "convert action")
+ return nil, fmt.Errorf("convert action: %w", err)
}
for _, name := range call.Names {
@@ -140,7 +139,7 @@ func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) {
for _, arg := range call.Args {
newOp, err := specOperatorToSeccompOperator(arg.Op)
if err != nil {
- return nil, errors.Wrap(err, "convert operator")
+ return nil, fmt.Errorf("convert operator: %w", err)
}
newArg := Arg{
@@ -164,7 +163,7 @@ func specArchToLibseccompArch(arch specs.Arch) (string, error) {
if res, ok := specArchToLibseccompArchMap[arch]; ok {
return res, nil
}
- return "", errors.Errorf(
+ return "", fmt.Errorf(
"architecture %q is not valid for libseccomp", arch,
)
}
@@ -174,7 +173,7 @@ func specArchToSeccompArch(arch specs.Arch) (Arch, error) {
if res, ok := specArchToSeccompArchMap[arch]; ok {
return res, nil
}
- return "", errors.Errorf("architecture %q is not valid", arch)
+ return "", fmt.Errorf("architecture %q is not valid", arch)
}
// specActionToSeccompAction converts a spec action into a seccomp one.
@@ -182,7 +181,7 @@ func specActionToSeccompAction(action specs.LinuxSeccompAction) (Action, error)
if res, ok := specActionToSeccompActionMap[action]; ok {
return res, nil
}
- return "", errors.Errorf(
+ return "", fmt.Errorf(
"spec action %q is not valid internal action", action,
)
}
@@ -192,7 +191,7 @@ func specOperatorToSeccompOperator(operator specs.LinuxSeccompOperator) (Operato
if op, ok := specOperatorToSeccompOperatorMap[operator]; ok {
return op, nil
}
- return "", errors.Errorf(
+ return "", fmt.Errorf(
"spec operator %q is not a valid internal operator", operator,
)
}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go
index 7f1783efb..72c95734b 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/filter.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go
@@ -7,8 +7,10 @@
package seccomp
import (
+ "errors"
+ "fmt"
+
specs "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
libseccomp "github.com/seccomp/libseccomp-golang"
"golang.org/x/sys/unix"
)
@@ -39,39 +41,39 @@ func BuildFilter(spec *specs.LinuxSeccomp) (*libseccomp.ScmpFilter, error) {
profile, err := specToSeccomp(spec)
if err != nil {
- return nil, errors.Wrap(err, "convert spec to seccomp profile")
+ return nil, fmt.Errorf("convert spec to seccomp profile: %w", err)
}
defaultAction, err := toAction(profile.DefaultAction, profile.DefaultErrnoRet)
if err != nil {
- return nil, errors.Wrapf(err, "convert default action %s", profile.DefaultAction)
+ return nil, fmt.Errorf("convert default action %s: %w", profile.DefaultAction, err)
}
filter, err := libseccomp.NewFilter(defaultAction)
if err != nil {
- return nil, errors.Wrapf(err, "create filter for default action %s", defaultAction)
+ return nil, fmt.Errorf("create filter for default action %s: %w", defaultAction, err)
}
// Add extra architectures
for _, arch := range spec.Architectures {
libseccompArch, err := specArchToLibseccompArch(arch)
if err != nil {
- return nil, errors.Wrap(err, "convert spec arch")
+ return nil, fmt.Errorf("convert spec arch: %w", err)
}
scmpArch, err := libseccomp.GetArchFromString(libseccompArch)
if err != nil {
- return nil, errors.Wrapf(err, "validate Seccomp architecture %s", arch)
+ return nil, fmt.Errorf("validate Seccomp architecture %s: %w", arch, err)
}
if err := filter.AddArch(scmpArch); err != nil {
- return nil, errors.Wrap(err, "add architecture to seccomp filter")
+ return nil, fmt.Errorf("add architecture to seccomp filter: %w", err)
}
}
// Unset no new privs bit
if err := filter.SetNoNewPrivsBit(false); err != nil {
- return nil, errors.Wrap(err, "set no new privileges flag")
+ return nil, fmt.Errorf("set no new privileges flag: %w", err)
}
// Add a rule for each syscall
@@ -81,7 +83,7 @@ func BuildFilter(spec *specs.LinuxSeccomp) (*libseccomp.ScmpFilter, error) {
}
if err = matchSyscall(filter, call); err != nil {
- return nil, errors.Wrap(err, "filter matches syscall")
+ return nil, fmt.Errorf("filter matches syscall: %w", err)
}
}
@@ -107,13 +109,13 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error {
// Convert the call's action to the libseccomp equivalent
callAct, err := toAction(call.Action, call.ErrnoRet)
if err != nil {
- return errors.Wrapf(err, "convert action %s", call.Action)
+ return fmt.Errorf("convert action %s: %w", call.Action, err)
}
// Unconditional match - just add the rule
if len(call.Args) == 0 {
if err = filter.AddRule(callNum, callAct); err != nil {
- return errors.Wrapf(err, "add seccomp filter rule for syscall %s", call.Name)
+ return fmt.Errorf("add seccomp filter rule for syscall %s: %w", call.Name, err)
}
} else {
// Linux system calls can have at most 6 arguments
@@ -127,7 +129,7 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error {
for _, cond := range call.Args {
newCond, err := toCondition(cond)
if err != nil {
- return errors.Wrapf(err, "create seccomp syscall condition for syscall %s", call.Name)
+ return fmt.Errorf("create seccomp syscall condition for syscall %s: %w", call.Name, err)
}
argCounts[cond.Index]++
@@ -150,13 +152,13 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error {
condArr := []libseccomp.ScmpCondition{cond}
if err = filter.AddRuleConditional(callNum, callAct, condArr); err != nil {
- return errors.Wrapf(err, "add seccomp rule for syscall %s", call.Name)
+ return fmt.Errorf("add seccomp rule for syscall %s: %w", call.Name, err)
}
}
} else if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil {
// No conditions share same argument
// Use new, proper behavior
- return errors.Wrapf(err, "add seccomp rule for syscall %s", call.Name)
+ return fmt.Errorf("add seccomp rule for syscall %s: %w", call.Name, err)
}
}
@@ -189,7 +191,7 @@ func toAction(act Action, errnoRet *uint) (libseccomp.ScmpAction, error) {
case ActLog:
return libseccomp.ActLog, nil
default:
- return libseccomp.ActInvalid, errors.Errorf("invalid action %s", act)
+ return libseccomp.ActInvalid, fmt.Errorf("invalid action %s", act)
}
}
@@ -202,14 +204,14 @@ func toCondition(arg *Arg) (cond libseccomp.ScmpCondition, err error) {
op, err := toCompareOp(arg.Op)
if err != nil {
- return cond, errors.Wrap(err, "convert compare operator")
+ return cond, fmt.Errorf("convert compare operator: %w", err)
}
condition, err := libseccomp.MakeCondition(
arg.Index, op, arg.Value, arg.ValueTwo,
)
if err != nil {
- return cond, errors.Wrap(err, "make condition")
+ return cond, fmt.Errorf("make condition: %w", err)
}
return condition, nil
@@ -234,6 +236,6 @@ func toCompareOp(op Operator) (libseccomp.ScmpCompareOp, error) {
case OpMaskedEqual:
return libseccomp.CompareMaskedEqual, nil
default:
- return libseccomp.CompareInvalid, errors.Errorf("invalid operator %s", op)
+ return libseccomp.CompareInvalid, fmt.Errorf("invalid operator %s", op)
}
}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go
index 669ab04a2..80558c1f0 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/validate.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go
@@ -5,8 +5,7 @@ package seccomp
import (
"encoding/json"
-
- "github.com/pkg/errors"
+ "fmt"
)
// ValidateProfile does a basic validation for the provided seccomp profile
@@ -14,16 +13,16 @@ import (
func ValidateProfile(content string) error {
profile := &Seccomp{}
if err := json.Unmarshal([]byte(content), &profile); err != nil {
- return errors.Wrap(err, "decoding seccomp profile")
+ return fmt.Errorf("decoding seccomp profile: %w", err)
}
spec, err := setupSeccomp(profile, nil)
if err != nil {
- return errors.Wrap(err, "create seccomp spec")
+ return fmt.Errorf("create seccomp spec: %w", err)
}
if _, err := BuildFilter(spec); err != nil {
- return errors.Wrap(err, "build seccomp filter")
+ return fmt.Errorf("build seccomp filter: %w", err)
}
return nil
diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go
index 6b92714d0..f1b7ef3f2 100644
--- a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go
+++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go
@@ -2,13 +2,14 @@ package filedriver
import (
"encoding/json"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"github.com/containers/storage/pkg/lockfile"
- "github.com/pkg/errors"
)
// secretsDataFile is the file where secrets data/payload will be stored
@@ -75,7 +76,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) {
if data, ok := secretData[id]; ok {
return data, nil
}
- return nil, errors.Wrapf(errNoSecretData, "%s", id)
+ return nil, fmt.Errorf("%s: %w", id, errNoSecretData)
}
// Store stores the bytes associated with an ID. An error is returned if the ID arleady exists
@@ -88,7 +89,7 @@ func (d *Driver) Store(id string, data []byte) error {
return err
}
if _, ok := secretData[id]; ok {
- return errors.Wrapf(errSecretIDExists, "%s", id)
+ return fmt.Errorf("%s: %w", id, errSecretIDExists)
}
secretData[id] = data
marshalled, err := json.MarshalIndent(secretData, "", " ")
@@ -113,7 +114,7 @@ func (d *Driver) Delete(id string) error {
if _, ok := secretData[id]; ok {
delete(secretData, id)
} else {
- return errors.Wrap(errNoSecretData, id)
+ return fmt.Errorf("%s: %w", id, errNoSecretData)
}
marshalled, err := json.MarshalIndent(secretData, "", " ")
if err != nil {
@@ -131,7 +132,7 @@ func (d *Driver) getAllData() (map[string][]byte, error) {
// check if the db file exists
_, err := os.Stat(d.secretsDataFilePath)
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
// the file will be created later on a store()
return make(map[string][]byte), nil
}
diff --git a/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go b/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go
index 50967b7cf..7a658c02d 100644
--- a/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go
+++ b/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go
@@ -3,6 +3,8 @@ package passdriver
import (
"bytes"
"context"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -10,8 +12,6 @@ import (
"path/filepath"
"sort"
"strings"
-
- "github.com/pkg/errors"
)
var (
@@ -108,7 +108,7 @@ func NewDriver(opts map[string]string) (*Driver, error) {
func (d *Driver) List() (secrets []string, err error) {
files, err := ioutil.ReadDir(d.Root)
if err != nil {
- return nil, errors.Wrap(err, "failed to read secret directory")
+ return nil, fmt.Errorf("failed to read secret directory: %w", err)
}
for _, f := range files {
fileName := f.Name()
@@ -127,10 +127,10 @@ func (d *Driver) Lookup(id string) ([]byte, error) {
return nil, err
}
if err := d.gpg(context.TODO(), nil, out, "--decrypt", key); err != nil {
- return nil, errors.Wrapf(errNoSecretData, id)
+ return nil, fmt.Errorf("%s: %w", id, errNoSecretData)
}
if out.Len() == 0 {
- return nil, errors.Wrapf(errNoSecretData, id)
+ return nil, fmt.Errorf("%s: %w", id, errNoSecretData)
}
return out.Bytes(), nil
}
@@ -138,7 +138,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) {
// Store saves the bytes associated with an ID. An error is returned if the ID already exists
func (d *Driver) Store(id string, data []byte) error {
if _, err := d.Lookup(id); err == nil {
- return errors.Wrap(errSecretIDExists, id)
+ return fmt.Errorf("%s: %w", id, errSecretIDExists)
}
in := bytes.NewReader(data)
key, err := d.getPath(id)
@@ -155,7 +155,7 @@ func (d *Driver) Delete(id string) error {
return err
}
if err := os.Remove(key); err != nil {
- return errors.Wrap(errNoSecretData, id)
+ return fmt.Errorf("%s: %w", id, errNoSecretData)
}
return nil
}
diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go
index e45995b2e..e23db1152 100644
--- a/vendor/github.com/containers/common/pkg/secrets/secrets.go
+++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go
@@ -1,6 +1,8 @@
package secrets
import (
+ "errors"
+ "fmt"
"os"
"path/filepath"
"regexp"
@@ -12,7 +14,6 @@ import (
"github.com/containers/common/pkg/secrets/shelldriver"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/stringid"
- "github.com/pkg/errors"
)
// maxSecretSize is the max size for secret data - 512kB
@@ -105,7 +106,7 @@ func NewManager(rootPath string) (*SecretsManager, error) {
manager := new(SecretsManager)
if !filepath.IsAbs(rootPath) {
- return nil, errors.Wrapf(errInvalidPath, "path must be absolute: %s", rootPath)
+ return nil, fmt.Errorf("path must be absolute: %s: %w", rootPath, errInvalidPath)
}
// the lockfile functions require that the rootPath dir is executable
if err := os.MkdirAll(rootPath, 0o700); err != nil {
@@ -146,7 +147,7 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, driv
return "", err
}
if exist {
- return "", errors.Wrapf(errSecretNameInUse, name)
+ return "", fmt.Errorf("%s: %w", name, errSecretNameInUse)
}
secr := new(Secret)
@@ -158,7 +159,7 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, driv
newID = newID[0:secretIDLength]
_, err := s.lookupSecret(newID)
if err != nil {
- if errors.Cause(err) == ErrNoSuchSecret {
+ if errors.Is(err, ErrNoSuchSecret) {
secr.ID = newID
break
} else {
@@ -178,12 +179,12 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, driv
}
err = driver.Store(secr.ID, data)
if err != nil {
- return "", errors.Wrapf(err, "error creating secret %s", name)
+ return "", fmt.Errorf("error creating secret %s: %w", name, err)
}
err = s.store(secr)
if err != nil {
- return "", errors.Wrapf(err, "error creating secret %s", name)
+ return "", fmt.Errorf("error creating secret %s: %w", name, err)
}
return secr.ID, nil
@@ -213,12 +214,12 @@ func (s *SecretsManager) Delete(nameOrID string) (string, error) {
err = driver.Delete(secretID)
if err != nil {
- return "", errors.Wrapf(err, "error deleting secret %s", nameOrID)
+ return "", fmt.Errorf("error deleting secret %s: %w", nameOrID, err)
}
err = s.delete(secretID)
if err != nil {
- return "", errors.Wrapf(err, "error deleting secret %s", nameOrID)
+ return "", fmt.Errorf("error deleting secret %s: %w", nameOrID, err)
}
return secretID, nil
}
@@ -271,7 +272,7 @@ func (s *SecretsManager) LookupSecretData(nameOrID string) (*Secret, []byte, err
// validateSecretName checks if the secret name is valid.
func validateSecretName(name string) error {
if !secretNameRegexp.MatchString(name) || len(name) > 64 || strings.HasSuffix(name, "-") || strings.HasSuffix(name, ".") {
- return errors.Wrapf(errInvalidSecretName, "only 64 [a-zA-Z0-9-_.] characters allowed, and the start and end character must be [a-zA-Z0-9]: %s", name)
+ return fmt.Errorf("only 64 [a-zA-Z0-9-_.] characters allowed, and the start and end character must be [a-zA-Z0-9]: %s: %w", name, errInvalidSecretName)
}
return nil
}
@@ -283,7 +284,7 @@ func getDriver(name string, opts map[string]string) (SecretsDriver, error) {
if path, ok := opts["path"]; ok {
return filedriver.NewDriver(path)
}
- return nil, errors.Wrap(errInvalidDriverOpt, "need path for filedriver")
+ return nil, fmt.Errorf("need path for filedriver: %w", errInvalidDriverOpt)
case "pass":
return passdriver.NewDriver(opts)
case "shell":
diff --git a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go
index 8d21aa754..91b0b7eb0 100644
--- a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go
+++ b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go
@@ -2,12 +2,12 @@ package secrets
import (
"encoding/json"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"strings"
"time"
-
- "github.com/pkg/errors"
)
type db struct {
@@ -70,21 +70,21 @@ func (s *SecretsManager) getNameAndID(nameOrID string) (name, id string, err err
name, id, err = s.getExactNameAndID(nameOrID)
if err == nil {
return name, id, nil
- } else if errors.Cause(err) != ErrNoSuchSecret {
+ } else if !errors.Is(err, ErrNoSuchSecret) {
return "", "", err
}
// ID prefix may have been given, iterate through all IDs.
// ID and partial ID has a max length of 25, so we return if its greater than that.
if len(nameOrID) > secretIDLength {
- return "", "", errors.Wrapf(ErrNoSuchSecret, "no secret with name or id %q", nameOrID)
+ return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, ErrNoSuchSecret)
}
exists := false
var foundID, foundName string
for id, name := range s.db.IDToName {
if strings.HasPrefix(id, nameOrID) {
if exists {
- return "", "", errors.Wrapf(errAmbiguous, "more than one result secret with prefix %s", nameOrID)
+ return "", "", fmt.Errorf("more than one result secret with prefix %s: %w", nameOrID, errAmbiguous)
}
exists = true
foundID = id
@@ -95,7 +95,7 @@ func (s *SecretsManager) getNameAndID(nameOrID string) (name, id string, err err
if exists {
return foundName, foundID, nil
}
- return "", "", errors.Wrapf(ErrNoSuchSecret, "no secret with name or id %q", nameOrID)
+ return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, ErrNoSuchSecret)
}
// getExactNameAndID takes a secret's name or ID and returns both its name and full ID.
@@ -114,7 +114,7 @@ func (s *SecretsManager) getExactNameAndID(nameOrID string) (name, id string, er
return name, id, nil
}
- return "", "", errors.Wrapf(ErrNoSuchSecret, "no secret with name or id %q", nameOrID)
+ return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, ErrNoSuchSecret)
}
// exactSecretExists checks if the secret exists, given a name or ID
@@ -122,7 +122,7 @@ func (s *SecretsManager) getExactNameAndID(nameOrID string) (name, id string, er
func (s *SecretsManager) exactSecretExists(nameOrID string) (bool, error) {
_, _, err := s.getExactNameAndID(nameOrID)
if err != nil {
- if errors.Cause(err) == ErrNoSuchSecret {
+ if errors.Is(err, ErrNoSuchSecret) {
return false, nil
}
return false, err
@@ -157,7 +157,7 @@ func (s *SecretsManager) lookupSecret(nameOrID string) (*Secret, error) {
return &secret, nil
}
- return nil, errors.Wrapf(ErrNoSuchSecret, "no secret with name or id %q", nameOrID)
+ return nil, fmt.Errorf("no secret with name or id %q: %w", nameOrID, ErrNoSuchSecret)
}
// Store creates a new secret in the secrets database.
diff --git a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
index 8eac200f7..87dc317a9 100644
--- a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
+++ b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
@@ -3,13 +3,12 @@ package shelldriver
import (
"bytes"
"context"
+ "errors"
"fmt"
"os"
"os/exec"
"sort"
"strings"
-
- "github.com/pkg/errors"
)
var (
@@ -126,7 +125,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) {
err := cmd.Run()
if err != nil {
- return nil, errors.Wrap(errNoSecretData, id)
+ return nil, fmt.Errorf("%s: %w", id, errNoSecretData)
}
return buf.Bytes(), nil
}
@@ -163,7 +162,7 @@ func (d *Driver) Delete(id string) error {
err := cmd.Run()
if err != nil {
- return errors.Wrap(errNoSecretData, id)
+ return fmt.Errorf("%s: %w", id, errNoSecretData)
}
return nil
diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go
index b111ae7ff..ff82b5a39 100644
--- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go
+++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go
@@ -2,6 +2,8 @@ package subscriptions
import (
"bufio"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -11,7 +13,6 @@ import (
"github.com/containers/storage/pkg/idtools"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -51,7 +52,7 @@ func readAll(root, prefix string, parentMode os.FileMode) ([]subscriptionData, e
files, err := ioutil.ReadDir(path)
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
return data, nil
}
@@ -63,7 +64,7 @@ func readAll(root, prefix string, parentMode os.FileMode) ([]subscriptionData, e
if err != nil {
// If the file did not exist, might be a dangling symlink
// Ignore the error
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
continue
}
return nil, err
@@ -105,7 +106,7 @@ func getHostSubscriptionData(hostDir string, mode os.FileMode) ([]subscriptionDa
var allSubscriptions []subscriptionData
hostSubscriptions, err := readAll(hostDir, "", mode)
if err != nil {
- return nil, errors.Wrapf(err, "failed to read subscriptions from %q", hostDir)
+ return nil, fmt.Errorf("failed to read subscriptions from %q: %w", hostDir, err)
}
return append(allSubscriptions, hostSubscriptions...), nil
}
@@ -144,7 +145,7 @@ func getMountsMap(path string) (string, string, error) { //nolint
case 2:
return arr[0], arr[1], nil
}
- return "", "", errors.Errorf("unable to get host and container dir from path: %s", path)
+ return "", "", fmt.Errorf("unable to get host and container dir from path: %s", path)
}
// MountsWithUIDGID copies, adds, and mounts the subscriptions to the container root filesystem
@@ -195,7 +196,7 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string,
if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil {
logrus.Errorf("Adding FIPS mode subscription to container: %v", err)
}
- case os.IsNotExist(err):
+ case errors.Is(err, os.ErrNotExist):
logrus.Debug("/etc/system-fips does not exist on host, not mounting FIPS mode subscription")
default:
logrus.Errorf("stat /etc/system-fips failed for FIPS mode subscription: %v", err)
@@ -222,7 +223,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string
// skip if the hostDirOrFile path doesn't exist
fileInfo, err := os.Stat(hostDirOrFile)
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
logrus.Warnf("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath)
continue
}
@@ -233,7 +234,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string
// In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost
_, err = os.Stat(ctrDirOrFileOnHost)
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
hostDirOrFile, err = resolveSymbolicLink(hostDirOrFile)
if err != nil {
@@ -247,15 +248,15 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string
switch mode := fileInfo.Mode(); {
case mode.IsDir():
if err = os.MkdirAll(ctrDirOrFileOnHost, mode.Perm()); err != nil {
- return nil, errors.Wrap(err, "making container directory")
+ return nil, fmt.Errorf("making container directory: %w", err)
}
data, err := getHostSubscriptionData(hostDirOrFile, mode.Perm())
if err != nil {
- return nil, errors.Wrap(err, "getting host subscription data")
+ return nil, fmt.Errorf("getting host subscription data: %w", err)
}
for _, s := range data {
if err := s.saveTo(ctrDirOrFileOnHost); err != nil {
- return nil, errors.Wrapf(err, "error saving data to container filesystem on host %q", ctrDirOrFileOnHost)
+ return nil, fmt.Errorf("error saving data to container filesystem on host %q: %w", ctrDirOrFileOnHost, err)
}
}
case mode.IsRegular():
@@ -268,16 +269,16 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string
return nil, err
}
if err := ioutil.WriteFile(ctrDirOrFileOnHost, s.data, s.mode); err != nil {
- return nil, errors.Wrap(err, "saving data to container filesystem")
+ return nil, fmt.Errorf("saving data to container filesystem: %w", err)
}
}
default:
- return nil, errors.Errorf("unsupported file type for: %q", hostDirOrFile)
+ return nil, fmt.Errorf("unsupported file type for: %q", hostDirOrFile)
}
err = label.Relabel(ctrDirOrFileOnHost, mountLabel, false)
if err != nil {
- return nil, errors.Wrap(err, "error applying correct labels")
+ return nil, fmt.Errorf("error applying correct labels: %w", err)
}
if uid != 0 || gid != 0 {
if err := rchown(ctrDirOrFileOnHost, uid, gid); err != nil {
@@ -311,20 +312,20 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string
func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error {
subscriptionsDir := "/run/secrets"
ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir)
- if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) {
+ if _, err := os.Stat(ctrDirOnHost); errors.Is(err, os.ErrNotExist) {
if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint
return err
}
if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil {
- return errors.Wrapf(err, "applying correct labels on %q", ctrDirOnHost)
+ return fmt.Errorf("applying correct labels on %q: %w", ctrDirOnHost, err)
}
}
fipsFile := filepath.Join(ctrDirOnHost, "system-fips")
// In the event of restart, it is possible for the FIPS mode file to already exist
- if _, err := os.Stat(fipsFile); os.IsNotExist(err) {
+ if _, err := os.Stat(fipsFile); errors.Is(err, os.ErrNotExist) {
file, err := os.Create(fipsFile)
if err != nil {
- return errors.Wrap(err, "creating system-fips file in container for FIPS mode")
+ return fmt.Errorf("creating system-fips file in container for FIPS mode: %w", err)
}
file.Close()
}
@@ -343,10 +344,10 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint,
destDir := "/etc/crypto-policies/back-ends"
srcOnHost := filepath.Join(mountPoint, srcBackendDir)
if _, err := os.Stat(srcOnHost); err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
return nil
}
- return errors.Wrap(err, "FIPS Backend directory")
+ return fmt.Errorf("FIPS Backend directory: %w", err)
}
if !mountExists(*mounts, destDir) {
diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go
index 196176a1c..84201c998 100644
--- a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go
+++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go
@@ -3,6 +3,7 @@ package supplemented
import (
"container/list"
"context"
+ "fmt"
"io"
cp "github.com/containers/image/v5/copy"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/v5/types"
multierror "github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -58,7 +58,7 @@ func Reference(ref types.ImageReference, supplemental []types.ImageReference, mu
func (s *supplementedImageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := s.NewImageSource(ctx, sys)
if err != nil {
- return nil, errors.Wrapf(err, "error building a new Image using an ImageSource")
+ return nil, fmt.Errorf("error building a new Image using an ImageSource: %w", err)
}
return image.FromSource(ctx, sys, src)
}
@@ -75,7 +75,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty
// Open the default instance for reading.
top, err := s.ImageReference.NewImageSource(ctx, sys)
if err != nil {
- return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(s.ImageReference))
+ return nil, fmt.Errorf("error opening %q as image source: %w", transports.ImageName(s.ImageReference), err)
}
defer func() {
@@ -105,14 +105,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty
// Mark this instance as being associated with this ImageSource.
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return errors.Wrapf(err, "error computing digest over manifest %q", string(manifestBytes))
+ return fmt.Errorf("error computing digest over manifest %q: %w", string(manifestBytes), err)
}
sources[manifestDigest] = src
// Parse the manifest as a single image.
man, err := manifest.FromBlob(manifestBytes, manifestType)
if err != nil {
- return errors.Wrapf(err, "error parsing manifest %q", string(manifestBytes))
+ return fmt.Errorf("error parsing manifest %q: %w", string(manifestBytes), err)
}
// Log the config blob's digest and the blobs of its layers as associated with this manifest.
@@ -135,14 +135,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty
// Mark this instance as being associated with this ImageSource.
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return errors.Wrapf(err, "error computing manifest digest")
+ return fmt.Errorf("error computing manifest digest: %w", err)
}
sources[manifestDigest] = src
// Parse the manifest as a list of images.
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
- return errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(manifestBytes), manifestType)
+ return fmt.Errorf("error parsing manifest blob %q as a %q: %w", string(manifestBytes), manifestType, err)
}
// Figure out which of its instances we want to look at.
@@ -151,7 +151,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty
case cp.CopySystemImage:
instance, err := list.ChooseInstance(sys)
if err != nil {
- return errors.Wrapf(err, "error selecting appropriate instance from list")
+ return fmt.Errorf("error selecting appropriate instance from list: %w", err)
}
chaseInstances = []digest.Digest{instance}
case cp.CopySpecificImages:
@@ -194,14 +194,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty
} else {
src, err = ref.NewImageSource(ctx, sys)
if err != nil {
- return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(ref))
+ return nil, fmt.Errorf("error opening %q as image source: %w", transports.ImageName(ref), err)
}
}
// Read the default manifest for the image.
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error reading default manifest from image %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error reading default manifest from image %q: %w", transports.ImageName(ref), err)
}
// If this is the first image, mark it as our starting point.
@@ -223,18 +223,18 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty
// Record the digest of the ImageSource's default instance's manifest.
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return nil, errors.Wrapf(err, "error computing digest of manifest from image %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error computing digest of manifest from image %q: %w", transports.ImageName(ref), err)
}
sis.sourceDefaultInstances[src] = manifestDigest
// If the ImageSource's default manifest is a list, parse each of its instances.
if manifest.MIMETypeIsMultiImage(manifestType) {
if err = addMulti(manifestBytes, manifestType, src); err != nil {
- return nil, errors.Wrapf(err, "error adding multi-image %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error adding multi-image %q: %w", transports.ImageName(ref), err)
}
} else {
if err = addSingle(manifestBytes, manifestType, src); err != nil {
- return nil, errors.Wrapf(err, "error adding single image %q", transports.ImageName(ref))
+ return nil, fmt.Errorf("error adding single image %q: %w", transports.ImageName(ref), err)
}
}
}
@@ -257,22 +257,22 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty
// Read the instance's manifest.
manifestBytes, manifestType, err := manifestToRead.src.GetManifest(ctx, manifestToRead.instance)
if err != nil {
- // if errors.Cause(err) == storage.ErrImageUnknown || os.IsNotExist(errors.Cause(err)) {
+ // if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, os.ErrNotExist) {
// Trust that we either don't need it, or that it's in another reference.
// continue
// }
- return nil, errors.Wrapf(err, "error reading manifest for instance %q", manifestToRead.instance)
+ return nil, fmt.Errorf("error reading manifest for instance %q: %w", manifestToRead.instance, err)
}
if manifest.MIMETypeIsMultiImage(manifestType) {
// Add the list's contents.
if err = addMulti(manifestBytes, manifestType, manifestToRead.src); err != nil {
- return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance)
+ return nil, fmt.Errorf("error adding single image instance %q: %w", manifestToRead.instance, err)
}
} else {
// Add the single image's contents.
if err = addSingle(manifestBytes, manifestType, manifestToRead.src); err != nil {
- return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance)
+ return nil, fmt.Errorf("error adding single image instance %q: %w", manifestToRead.instance, err)
}
}
}
@@ -281,7 +281,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty
}
func (s *supplementedImageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("deletion of images not implemented")
+ return fmt.Errorf("deletion of images not implemented")
}
func (s *supplementedImageSource) Close() error {
@@ -313,17 +313,17 @@ func (s *supplementedImageSource) GetManifest(ctx context.Context, instanceDiges
}
return sourceInstance.GetManifest(ctx, requestInstanceDigest)
}
- return nil, "", errors.Wrapf(ErrDigestNotFound, "error getting manifest for digest %q", *instanceDigest)
+ return nil, "", fmt.Errorf("error getting manifest for digest %q: %w", *instanceDigest, ErrDigestNotFound)
}
func (s *supplementedImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, bic types.BlobInfoCache) (io.ReadCloser, int64, error) {
sourceInstance, ok := s.instancesByBlobDigest[blob.Digest]
if !ok {
- return nil, -1, errors.Wrapf(ErrBlobNotFound, "error blob %q in known instances", blob.Digest)
+ return nil, -1, fmt.Errorf("error blob %q in known instances: %w", blob.Digest, ErrBlobNotFound)
}
src, ok := s.sourceInstancesByInstance[sourceInstance]
if !ok {
- return nil, -1, errors.Wrapf(ErrDigestNotFound, "error getting image source for instance %q", sourceInstance)
+ return nil, -1, fmt.Errorf("error getting image source for instance %q: %w", sourceInstance, ErrDigestNotFound)
}
return src.GetBlob(ctx, blob, bic)
}
@@ -364,7 +364,7 @@ func (s *supplementedImageSource) GetSignatures(ctx context.Context, instanceDig
if src != nil {
return src.GetSignatures(ctx, requestInstanceDigest)
}
- return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to read signatures", digest)
+ return nil, fmt.Errorf("error finding instance for instance digest %q to read signatures: %w", digest, ErrDigestNotFound)
}
func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
@@ -387,7 +387,7 @@ func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanc
if src != nil {
blobInfos, err := src.LayerInfosForCopy(ctx, requestInstanceDigest)
if err != nil {
- return nil, errors.Wrapf(err, "error reading layer infos for copy from instance %q", instanceDigest)
+ return nil, fmt.Errorf("error reading layer infos for copy from instance %q: %w", instanceDigest, err)
}
var manifestDigest digest.Digest
if instanceDigest != nil {
@@ -398,5 +398,5 @@ func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanc
}
return blobInfos, nil
}
- return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to copy layers", errMsgDigest)
+ return nil, fmt.Errorf("error finding instance for instance digest %q to copy layers: %w", errMsgDigest, ErrDigestNotFound)
}
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
index 6420ba274..39cc5beb0 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
@@ -1,6 +1,8 @@
package sysinfo
import (
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path"
@@ -8,7 +10,6 @@ import (
"github.com/containers/common/pkg/cgroupv2"
"github.com/opencontainers/runc/libcontainer/cgroups"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -16,7 +17,7 @@ import (
func findCgroupMountpoints() (map[string]string, error) {
cgMounts, err := cgroups.GetCgroupMounts(false)
if err != nil {
- return nil, errors.Wrap(err, "parse cgroup information")
+ return nil, fmt.Errorf("parse cgroup information: %w", err)
}
mps := make(map[string]string)
for _, m := range cgMounts {
@@ -51,7 +52,7 @@ func New(quiet bool) *SysInfo {
sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables")
// Check if AppArmor is supported.
- if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) {
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); !errors.Is(err, os.ErrNotExist) {
sysInfo.AppArmor = true
}
diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go
index 35201f932..6d7060af4 100644
--- a/vendor/github.com/containers/common/pkg/util/util_supported.go
+++ b/vendor/github.com/containers/common/pkg/util/util_supported.go
@@ -4,6 +4,7 @@
package util
import (
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -11,7 +12,6 @@ import (
"syscall"
"github.com/containers/storage/pkg/unshare"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -72,7 +72,7 @@ func GetRuntimeDir() (string, error) {
}
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
- rootlessRuntimeDirError = errors.Wrap(err, "cannot resolve home")
+ rootlessRuntimeDirError = fmt.Errorf("cannot resolve home: %w", err)
return
}
runtimeDir = filepath.Join(resolvedHome, "rundir")
diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go
index 1cffb21fc..1525bdc34 100644
--- a/vendor/github.com/containers/common/pkg/util/util_windows.go
+++ b/vendor/github.com/containers/common/pkg/util/util_windows.go
@@ -4,7 +4,7 @@
package util
import (
- "github.com/pkg/errors"
+ "errors"
)
// getRuntimeDir returns the runtime directory
diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go
index 020e703e8..cfac3e6d5 100644
--- a/vendor/github.com/containers/image/v5/copy/blob.go
+++ b/vendor/github.com/containers/image/v5/copy/blob.go
@@ -2,12 +2,13 @@ package copy
import (
"context"
+ "errors"
+ "fmt"
"io"
"github.com/containers/image/v5/internal/private"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -34,7 +35,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
+ return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err)
}
stream.reader = digestingReader
@@ -105,7 +106,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
}
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "writing blob")
+ return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
}
uploadedInfo.Annotations = stream.info.Annotations
@@ -124,15 +125,15 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(io.Discard, originalLayerReader)
if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
+ return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err)
}
}
if digestingReader.validationFailed { // Coverage: This should never happen.
- return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
+ return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
}
if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest {
- return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
+ return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil {
@@ -163,8 +164,8 @@ type errorAnnotationReader struct {
// Read annotates the error happened during read
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
n, err = r.reader.Read(b)
- if err != io.EOF {
- return n, errors.Wrapf(err, "happened during read")
+ if err != nil && err != io.EOF {
+ return n, fmt.Errorf("happened during read: %w", err)
}
return n, err
}
diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go
index 99305a039..ff0e7945d 100644
--- a/vendor/github.com/containers/image/v5/copy/compression.go
+++ b/vendor/github.com/containers/image/v5/copy/compression.go
@@ -1,13 +1,14 @@
package copy
import (
+ "errors"
+ "fmt"
"io"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -26,7 +27,7 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
- return bpDetectCompressionStepData{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
+ return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
}
stream.reader = reader
@@ -259,7 +260,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
- return errors.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
+ return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
}
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 0df595237..a318d3f82 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -3,6 +3,7 @@ package copy
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"os"
@@ -18,6 +19,7 @@ import (
"github.com/containers/image/v5/internal/imagesource"
"github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/internal/private"
+ internalsig "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
@@ -28,7 +30,6 @@ import (
encconfig "github.com/containers/ocicrypt/config"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v7"
"golang.org/x/sync/semaphore"
@@ -121,15 +122,17 @@ type ImageListSelection int
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
type Options struct {
- RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
- SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
- SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`.
- SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination
- ReportWriter io.Writer
- SourceCtx *types.SystemContext
- DestinationCtx *types.SystemContext
- ProgressInterval time.Duration // time to wait between reports to signal the progress channel
- Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
+ RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
+ SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
+ SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`.
+ SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path.
+ SignSigstorePrivateKeyPassphrase []byte // Passphare to use when signing with `SignBySigstorePrivateKeyFile`.
+ SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination
+ ReportWriter io.Writer
+ SourceCtx *types.SystemContext
+ DestinationCtx *types.SystemContext
+ ProgressInterval time.Duration // time to wait between reports to signal the progress channel
+ Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
// Preserve digests, and fail if we cannot.
PreserveDigests bool
@@ -175,7 +178,7 @@ func validateImageListSelection(selection ImageListSelection) error {
case CopySystemImage, CopyAllImages, CopySpecificImages:
return nil
default:
- return errors.Errorf("Invalid value for options.ImageListSelection: %d", selection)
+ return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection)
}
}
@@ -204,23 +207,31 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
if err != nil {
- return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef))
+ return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err)
}
dest := imagedestination.FromPublic(publicDest)
defer func() {
if err := dest.Close(); err != nil {
- retErr = errors.Wrapf(retErr, " (dest: %v)", err)
+ if retErr != nil {
+ retErr = fmt.Errorf(" (dest: %v): %w", err, retErr)
+ } else {
+ retErr = fmt.Errorf(" (dest: %v)", err)
+ }
}
}()
publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
if err != nil {
- return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err)
}
rawSource := imagesource.FromPublic(publicRawSource)
defer func() {
if err := rawSource.Close(); err != nil {
- retErr = errors.Wrapf(retErr, " (src: %v)", err)
+ if retErr != nil {
+ retErr = fmt.Errorf(" (src: %v): %w", err, retErr)
+ } else {
+ retErr = fmt.Errorf(" (src: %v)", err)
+ }
}
}()
@@ -277,7 +288,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
multiImage, err := isMultiImage(ctx, unparsedToplevel)
if err != nil {
- return nil, errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err)
}
if !multiImage {
@@ -290,26 +301,26 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
// matches the current system to copy, and copy it.
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "reading manifest for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
}
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "parsing primary manifest as list for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
}
instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx
if err != nil {
- return nil, errors.Wrapf(err, "choosing an image from manifest list %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err)
}
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil {
- return nil, errors.Wrap(err, "copying system image from manifest list")
+ return nil, fmt.Errorf("copying system image from manifest list: %w", err)
}
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
// If we were asked to copy multiple images and can't, that's an error.
if !supportsMultipleImages(c.dest) {
- return nil, errors.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
+ return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
}
// Copy some or all of the images.
switch options.ImageListSelection {
@@ -324,7 +335,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
- return nil, errors.Wrap(err, "committing the finished image")
+ return nil, fmt.Errorf("committing the finished image: %w", err)
}
return copiedManifest, nil
@@ -351,7 +362,7 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
+ return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
@@ -368,7 +379,7 @@ func compareImageDestinationManifestEqual(ctx context.Context, options *Options,
destManifestDigest, err := manifest.Digest(destManifest)
if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
+ return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
@@ -386,30 +397,30 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// Parse the list and get a copy of the original value after it's re-encoded.
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "reading manifest list")
+ return nil, fmt.Errorf("reading manifest list: %w", err)
}
originalList, err := manifest.ListFromBlob(manifestList, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "parsing manifest list %q", string(manifestList))
+ return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
}
updatedList := originalList.Clone()
// Read and/or clear the set of signatures for this list.
- var sigs [][]byte
+ var sigs []internalsig.Signature
if options.RemoveSignatures {
- sigs = [][]byte{}
+ sigs = []internalsig.Signature{}
} else {
c.Printf("Getting image list signatures\n")
- s, err := c.rawSource.GetSignatures(ctx, nil)
+ s, err := c.rawSource.GetSignaturesWithFormat(ctx, nil)
if err != nil {
- return nil, errors.Wrap(err, "reading signatures")
+ return nil, fmt.Errorf("reading signatures: %w", err)
}
sigs = s
}
if len(sigs) != 0 {
c.Printf("Checking if image list destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
+ return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err)
}
}
@@ -421,7 +432,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
destIsDigestedReference = true
matches, err := manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
- return nil, errors.Wrapf(err, "computing digest of source image's manifest")
+ return nil, fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return nil, errors.New("Digest of source image's manifest would not match destination reference")
@@ -453,11 +464,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
}
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
if err != nil {
- return nil, errors.Wrapf(err, "determining manifest list type to write to destination")
+ return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err)
}
if selectedListType != originalList.MIMEType() {
if cannotModifyManifestListReason != "" {
- return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason)
+ return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason)
}
}
@@ -495,7 +506,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
if err != nil {
- return nil, errors.Wrapf(err, "copying image %d/%d from manifest list", instancesCopied+1, imagesToCopy)
+ return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err)
}
instancesCopied++
// Record the result of a possible conversion here.
@@ -509,7 +520,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
if err = updatedList.UpdateInstances(updates); err != nil {
- return nil, errors.Wrapf(err, "updating manifest list")
+ return nil, fmt.Errorf("updating manifest list: %w", err)
}
// Iterate through supported list types, preferred format first.
@@ -524,7 +535,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if thisListType != updatedList.MIMEType() {
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
if err != nil {
- return nil, errors.Wrapf(err, "converting manifest list to list with MIME type %q", thisListType)
+ return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err)
}
}
@@ -532,17 +543,17 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// by serializing them both so that we can compare them.
attemptedManifestList, err := attemptedList.Serialize()
if err != nil {
- return nil, errors.Wrapf(err, "encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
+ return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err)
}
originalManifestList, err := originalList.Serialize()
if err != nil {
- return nil, errors.Wrapf(err, "encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
+ return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err)
}
// If we can't just use the original value, but we have to change it, flag an error.
if !bytes.Equal(attemptedManifestList, originalManifestList) {
if cannotModifyManifestListReason != "" {
- return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason)
+ return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason)
}
logrus.Debugf("Manifest list has been updated")
} else {
@@ -573,10 +584,17 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
}
sigs = append(sigs, newSig)
}
+ if options.SignBySigstorePrivateKeyFile != "" {
+ newSig, err := c.createSigstoreSignature(manifestList, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity)
+ if err != nil {
+ return nil, err
+ }
+ sigs = append(sigs, newSig)
+ }
c.Printf("Storing list signatures\n")
- if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil {
- return nil, errors.Wrap(err, "writing signatures")
+ if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
+ return nil, fmt.Errorf("writing signatures: %w", err)
}
return manifestList, nil
@@ -590,7 +608,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
multiImage, err := isMultiImage(ctx, unparsedImage)
if err != nil {
// FIXME FIXME: How to name a reference for the sub-image?
- return nil, "", "", errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
+ return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
}
if multiImage {
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
@@ -600,11 +618,11 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// (The multiImage check above only matches the MIME type, which we have received anyway.
// Actual parsing of anything should be deferred.)
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
- return nil, "", "", errors.Wrap(err, "Source image rejected")
+ return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
}
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
if err != nil {
- return nil, "", "", errors.Wrapf(err, "initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
+ return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
@@ -616,16 +634,16 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
destIsDigestedReference = true
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
- return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
+ return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
manifestList, _, err := unparsedToplevel.Manifest(ctx)
if err != nil {
- return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
+ return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
}
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
- return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
+ return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
@@ -638,21 +656,21 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
return nil, "", "", err
}
- var sigs [][]byte
+ var sigs []internalsig.Signature
if options.RemoveSignatures {
- sigs = [][]byte{}
+ sigs = []internalsig.Signature{}
} else {
c.Printf("Getting image source signatures\n")
- s, err := src.Signatures(ctx)
+ s, err := src.UntrustedSignatures(ctx)
if err != nil {
- return nil, "", "", errors.Wrap(err, "reading signatures")
+ return nil, "", "", fmt.Errorf("reading signatures: %w", err)
}
sigs = s
}
if len(sigs) != 0 {
c.Printf("Checking if image destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
+ return nil, "", "", fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err)
}
}
@@ -686,7 +704,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
// and we would reuse and sign it.
- ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == ""
+ ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" && options.SignBySigstorePrivateKeyFile == ""
if err := ic.updateEmbeddedDockerReference(); err != nil {
return nil, "", "", err
@@ -717,7 +735,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
if options.OptimizeDestinationImageAlreadyExists {
- shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
+ shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" || options.SignBySigstorePrivateKeyFile != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
noPendingManifestUpdates := ic.noPendingManifestUpdates()
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
@@ -752,8 +770,10 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
// have other options available that could still succeed.
- _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError)
- _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError)
+ var manifestTypeRejectedError types.ManifestTypeRejectedError
+ var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
+ isManifestRejected := errors.As(err, &manifestTypeRejectedError)
+ isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
// We don’t have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
@@ -765,7 +785,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so let’s bail out early and with a better error message.
if ic.cannotModifyManifestReason != "" {
- return nil, "", "", errors.Wrapf(err, "Writing manifest failed and we cannot try conversions: %q", cannotModifyManifestReason)
+ return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
@@ -802,10 +822,17 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
sigs = append(sigs, newSig)
}
+ if options.SignBySigstorePrivateKeyFile != "" {
+ newSig, err := c.createSigstoreSignature(manifestBytes, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity)
+ if err != nil {
+ return nil, "", "", err
+ }
+ sigs = append(sigs, newSig)
+ }
c.Printf("Storing signatures\n")
- if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil {
- return nil, "", "", errors.Wrap(err, "writing signatures")
+ if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
+ return nil, "", "", fmt.Errorf("writing signatures: %w", err)
}
return manifestBytes, retManifestType, retManifestDigest, nil
@@ -825,11 +852,11 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
if dest.MustMatchRuntimeOS() {
c, err := src.OCIConfig(ctx)
if err != nil {
- return errors.Wrapf(err, "parsing image configuration")
+ return fmt.Errorf("parsing image configuration: %w", err)
}
wantedPlatforms, err := platform.WantedPlatforms(sys)
if err != nil {
- return errors.Wrapf(err, "getting current platform information %#v", sys)
+ return fmt.Errorf("getting current platform information %#v: %w", sys, err)
}
options := newOrderedSet()
@@ -866,7 +893,7 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
}
if ic.cannotModifyManifestReason != "" {
- return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q",
+ return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q",
transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason)
}
ic.manifestUpdates.EmbeddedDockerReference = destRef
@@ -897,7 +924,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// If we only need to check authorization, no updates required.
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
if ic.cannotModifyManifestReason != "" {
- return errors.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
+ return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
}
srcInfos = updatedSrcInfos
srcInfosUpdated = true
@@ -1024,7 +1051,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
var pendingImage types.Image = ic.src
if !ic.noPendingManifestUpdates() {
if ic.cannotModifyManifestReason != "" {
- return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
+ return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
}
if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
// We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
@@ -1033,17 +1060,17 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
// when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
// Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
// If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
- return nil, "", errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
+ return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
}
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
if err != nil {
- return nil, "", errors.Wrap(err, "creating an updated image manifest")
+ return nil, "", fmt.Errorf("creating an updated image manifest: %w", err)
}
pendingImage = pi
}
man, _, err := pendingImage.Manifest(ctx)
if err != nil {
- return nil, "", errors.Wrap(err, "reading manifest")
+ return nil, "", fmt.Errorf("reading manifest: %w", err)
}
if err := ic.copyConfig(ctx, pendingImage); err != nil {
@@ -1060,7 +1087,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
}
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
logrus.Debugf("Error %v while writing manifest %q", err, string(man))
- return nil, "", errors.Wrapf(err, "writing manifest")
+ return nil, "", fmt.Errorf("writing manifest: %w", err)
}
return man, manifestDigest, nil
}
@@ -1083,7 +1110,7 @@ func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
configBlob, err := src.ConfigBlob(ctx)
if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
+ return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err)
}
destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
@@ -1098,7 +1125,7 @@ func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
return err
}
if destInfo.Digest != srcInfo.Digest {
- return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
+ return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
}
}
return nil
@@ -1163,7 +1190,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
SrcRef: srcRef,
})
if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest)
+ return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
}
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
@@ -1237,7 +1264,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
+ return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
}
defer srcStream.Close()
@@ -1253,7 +1280,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
return types.BlobInfo{}, "", ctx.Err()
case diffIDResult := <-diffIDChan:
if diffIDResult.err != nil {
- return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
+ return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err)
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
// Don’t record any associations that involve encrypted data. This is a bit crude,
diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go
index ccc9110ff..901d10826 100644
--- a/vendor/github.com/containers/image/v5/copy/digesting_reader.go
+++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go
@@ -1,11 +1,11 @@
package copy
import (
+ "fmt"
"hash"
"io"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
type digestingReader struct {
@@ -23,11 +23,11 @@ type digestingReader struct {
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
var digester digest.Digester
if err := expectedDigest.Validate(); err != nil {
- return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
+ return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
}
digestAlgorithm := expectedDigest.Algorithm()
if !digestAlgorithm.Available() {
- return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
+ return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
}
digester = digestAlgorithm.Digester()
@@ -47,14 +47,14 @@ func (d *digestingReader) Read(p []byte) (int, error) {
// Coverage: This should not happen, the hash.Hash interface requires
// d.digest.Write to never return an error, and the io.Writer interface
// requires n2 == len(input) if no error is returned.
- return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
+ return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err)
}
}
if err == io.EOF {
actualDigest := d.digester.Digest()
if actualDigest != d.expectedDigest {
d.validationFailed = true
- return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
+ return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
}
d.validationSucceeded = true
}
diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go
index ae0576da4..5eae8bfcd 100644
--- a/vendor/github.com/containers/image/v5/copy/encryption.go
+++ b/vendor/github.com/containers/image/v5/copy/encryption.go
@@ -1,12 +1,12 @@
package copy
import (
+ "fmt"
"strings"
"github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
@@ -41,7 +41,7 @@ func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.
}
reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false)
if err != nil {
- return nil, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
+ return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
}
stream.reader = reader
@@ -92,7 +92,7 @@ func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool
}
reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc)
if err != nil {
- return nil, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
+ return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
}
stream.reader = reader
@@ -116,7 +116,7 @@ func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *ty
encryptAnnotations, err := d.finalizer()
if err != nil {
- return errors.Wrap(err, "Unable to finalize encryption")
+ return fmt.Errorf("Unable to finalize encryption: %w", err)
}
*operation = types.Encrypt
if *annotations == nil {
diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go
index b65459f8c..df12e50c0 100644
--- a/vendor/github.com/containers/image/v5/copy/manifest.go
+++ b/vendor/github.com/containers/image/v5/copy/manifest.go
@@ -2,11 +2,12 @@ package copy
import (
"context"
+ "errors"
+ "fmt"
"strings"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -180,7 +181,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
if len(prioritizedTypes.list) == 0 {
- return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
+ return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
}
selectedType := prioritizedTypes.list[0]
otherSupportedTypes := prioritizedTypes.list[1:]
diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go
index aa42674bc..0ea06f4bb 100644
--- a/vendor/github.com/containers/image/v5/copy/sign.go
+++ b/vendor/github.com/containers/image/v5/copy/sign.go
@@ -1,38 +1,62 @@
package copy
import (
+ "fmt"
+
"github.com/containers/image/v5/docker/reference"
+ internalsig "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/signature/sigstore"
"github.com/containers/image/v5/transports"
- "github.com/pkg/errors"
)
// createSignature creates a new signature of manifest using keyIdentity.
-func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) ([]byte, error) {
+func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) (internalsig.Signature, error) {
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
- return nil, errors.Wrap(err, "initializing GPG")
+ return nil, fmt.Errorf("initializing GPG: %w", err)
}
defer mech.Close()
if err := mech.SupportsSigning(); err != nil {
- return nil, errors.Wrap(err, "Signing not supported")
+ return nil, fmt.Errorf("Signing not supported: %w", err)
}
if identity != nil {
if reference.IsNameOnly(identity) {
- return nil, errors.Errorf("Sign identity must be a fully specified reference %s", identity)
+ return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity)
}
} else {
identity = c.dest.Reference().DockerReference()
if identity == nil {
- return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
+ return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
}
}
- c.Printf("Signing manifest\n")
+ c.Printf("Signing manifest using simple signing\n")
newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
if err != nil {
- return nil, errors.Wrap(err, "creating signature")
+ return nil, fmt.Errorf("creating signature: %w", err)
+ }
+ return internalsig.SimpleSigningFromBlob(newSig), nil
+}
+
+// createSigstoreSignature creates a new sigstore signature of manifest using privateKeyFile and identity.
+func (c *copier) createSigstoreSignature(manifest []byte, privateKeyFile string, passphrase []byte, identity reference.Named) (internalsig.Signature, error) {
+ if identity != nil {
+ if reference.IsNameOnly(identity) {
+ return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String())
+ }
+ } else {
+ identity = c.dest.Reference().DockerReference()
+ if identity == nil {
+ return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
+ }
+ }
+
+ c.Printf("Signing manifest using a sigstore signature\n")
+ newSig, err := sigstore.SignDockerManifestWithPrivateKeyFileUnstable(manifest, identity, privateKeyFile, passphrase)
+ if err != nil {
+ return nil, fmt.Errorf("creating signature: %w", err)
}
return newSig, nil
}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index 3b135e68e..47a5c17cd 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -2,15 +2,20 @@ package directory
import (
"context"
+ "errors"
+ "fmt"
"io"
"os"
"path/filepath"
"runtime"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -21,47 +26,50 @@ const version = "Directory Transport Version: 1.1\n"
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
type dirImageDestination struct {
- ref dirReference
- desiredLayerCompression types.LayerCompression
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.AlwaysSupportsSignatures
+
+ ref dirReference
}
// newImageDestination returns an ImageDestination for writing to a directory.
-func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) {
+func newImageDestination(sys *types.SystemContext, ref dirReference) (private.ImageDestination, error) {
desiredLayerCompression := types.PreserveOriginal
if sys != nil {
if sys.DirForceCompress {
desiredLayerCompression = types.Compress
if sys.DirForceDecompress {
- return nil, errors.Errorf("Cannot compress and decompress at the same time")
+ return nil, fmt.Errorf("Cannot compress and decompress at the same time")
}
}
if sys.DirForceDecompress {
desiredLayerCompression = types.Decompress
}
}
- d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression}
// If directory exists check if it is empty
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
// if the contents don't match throw an error
- dirExists, err := pathExists(d.ref.resolvedPath)
+ dirExists, err := pathExists(ref.resolvedPath)
if err != nil {
- return nil, errors.Wrapf(err, "checking for path %q", d.ref.resolvedPath)
+ return nil, fmt.Errorf("checking for path %q: %w", ref.resolvedPath, err)
}
if dirExists {
- isEmpty, err := isDirEmpty(d.ref.resolvedPath)
+ isEmpty, err := isDirEmpty(ref.resolvedPath)
if err != nil {
return nil, err
}
if !isEmpty {
- versionExists, err := pathExists(d.ref.versionPath())
+ versionExists, err := pathExists(ref.versionPath())
if err != nil {
- return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath())
+ return nil, fmt.Errorf("checking if path exists %q: %w", ref.versionPath(), err)
}
if versionExists {
- contents, err := os.ReadFile(d.ref.versionPath())
+ contents, err := os.ReadFile(ref.versionPath())
if err != nil {
return nil, err
}
@@ -73,22 +81,37 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
return nil, ErrNotContainerImageDir
}
// delete directory contents so that only one image is in the directory at a time
- if err = removeDirContents(d.ref.resolvedPath); err != nil {
- return nil, errors.Wrapf(err, "erasing contents in %q", d.ref.resolvedPath)
+ if err = removeDirContents(ref.resolvedPath); err != nil {
+ return nil, fmt.Errorf("erasing contents in %q: %w", ref.resolvedPath, err)
}
- logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
+ logrus.Debugf("overwriting existing container image directory %q", ref.resolvedPath)
}
} else {
// create directory if it doesn't exist
- if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil {
- return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath)
+ if err := os.MkdirAll(ref.resolvedPath, 0755); err != nil {
+ return nil, fmt.Errorf("unable to create directory %q: %w", ref.resolvedPath, err)
}
}
// create version file
- err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644)
+ err = os.WriteFile(ref.versionPath(), []byte(version), 0644)
if err != nil {
- return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath())
+ return nil, fmt.Errorf("creating version file %q: %w", ref.versionPath(), err)
+ }
+
+ d := &dirImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: nil,
+ DesiredLayerCompression: desiredLayerCompression,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
}
+ d.Compat = impl.AddCompat(d)
return d, nil
}
@@ -103,51 +126,14 @@ func (d *dirImageDestination) Close() error {
return nil
}
-func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
- return nil
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
- return d.desiredLayerCompression
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *dirImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *dirImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *dirImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -171,7 +157,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
@@ -198,18 +184,16 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest")
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
@@ -234,12 +218,17 @@ func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte,
return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
}
-// PutSignatures writes a set of signatures to the destination.
+// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
for i, sig := range signatures {
- if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil {
return err
}
}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go
index 8b509112a..98efdedd7 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_src.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_src.go
@@ -2,22 +2,41 @@ package directory
import (
"context"
+ "fmt"
"io"
"os"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
type dirImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
ref dirReference
}
// newImageSource returns an ImageSource reading from an existing directory.
// The caller must call .Close() on the returned ImageSource.
-func newImageSource(ref dirReference) types.ImageSource {
- return &dirImageSource{ref}
+func newImageSource(ref dirReference) private.ImageSource {
+ s := &dirImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s
}
// Reference returns the reference used to set up this source, _as specified by the user_
@@ -43,11 +62,6 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, manifest.GuessMIMEType(m), err
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *dirImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -63,33 +77,26 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
return r, fi.Size(), nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- signatures := [][]byte{}
+func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ signatures := []signature.Signature{}
for i := 0; ; i++ {
- signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest))
+ path := s.ref.signaturePath(i, instanceDigest)
+ sigBlob, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
break
}
return nil, err
}
+ signature, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature %q: %w", path, err)
+ }
signatures = append(signatures, signature)
}
return signatures, nil
}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go
index 562404470..253ecb247 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_transport.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go
@@ -2,6 +2,7 @@ package directory
import (
"context"
+ "errors"
"fmt"
"path/filepath"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
func init() {
@@ -39,7 +39,7 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er
// scope passed to this function will not be "", that value is always allowed.
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
if !strings.HasPrefix(scope, "/") {
- return errors.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry.
@@ -48,7 +48,7 @@ func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
}
cleaned := filepath.Clean(scope)
if cleaned != scope {
- return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
}
return nil
}
@@ -157,7 +157,7 @@ func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.Syst
// DeleteImage deletes the named image from the registry, if supported.
func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for dir: images")
+ return errors.New("Deleting images not implemented for dir: images")
}
// manifestPath returns a path for the manifest within a directory using our conventions.
diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
index 71136b880..b4ff4d08a 100644
--- a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
+++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
@@ -1,10 +1,9 @@
package explicitfilepath
import (
+ "fmt"
"os"
"path/filepath"
-
- "github.com/pkg/errors"
)
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
@@ -26,14 +25,14 @@ func ResolvePathToFullyExplicit(path string) (string, error) {
// This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
// We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
// in the resulting path, and especially not at the end.
- return "", errors.Errorf("Unexpectedly missing special filename component in %s", path)
+ return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path)
}
resolvedPath := filepath.Join(resolvedParent, file)
// As a sanity check, ensure that there are no "." or ".." components.
cleanedResolvedPath := filepath.Clean(resolvedPath)
if cleanedResolvedPath != resolvedPath {
// Coverage: This should never happen.
- return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
+ return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
}
return resolvedPath, nil
default: // err != nil, unrecognized
diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go
index d4248db21..60521662e 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go
@@ -2,11 +2,12 @@ package archive
import (
"context"
+ "fmt"
"io"
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
type archiveImageDestination struct {
@@ -16,9 +17,9 @@ type archiveImageDestination struct {
writer io.Closer // May be nil if the archive is shared
}
-func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
+func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) {
if ref.sourceIndex != -1 {
- return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
+ return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
}
var archive *tarfile.Writer
@@ -35,7 +36,7 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.
archive = tarfile.NewWriter(fh)
writer = fh
}
- tarDest := tarfile.NewDestination(sys, archive, ref.ref)
+ tarDest := tarfile.NewDestination(sys, archive, ref.Transport().Name(), ref.ref)
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
}
@@ -47,11 +48,6 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.
}, nil
}
-// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
-func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Decompress
-}
-
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *archiveImageDestination) Reference() types.ImageReference {
diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go
index 4bb519a26..875a15257 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/reader.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go
@@ -1,11 +1,12 @@
package archive
import (
+ "fmt"
+
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Reader manages a single Docker archive, allows listing its contents and accessing
@@ -40,10 +41,10 @@ func (r *Reader) Close() error {
func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
standalone, ok := ref.(archiveReference)
if !ok {
- return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
if standalone.archiveReader != nil {
- return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
+ return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
}
reader, err := NewReader(sys, standalone.path)
if err != nil {
@@ -73,22 +74,22 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
for _, tag := range image.RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
- return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
+ return nil, fmt.Errorf("Invalid tag %#v in manifest item @%d: %w", tag, imageIndex, err)
}
nt, ok := parsedTag.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
+ return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
}
ref, err := newReference(r.path, nt, -1, r.archive, nil)
if err != nil {
- return nil, errors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
+ return nil, fmt.Errorf("creating a reference for tag %#v in manifest item @%d: %w", tag, imageIndex, err)
}
refs = append(refs, ref)
}
if len(refs) == 0 {
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
if err != nil {
- return nil, errors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex)
+ return nil, fmt.Errorf("creating a reference for manifest item @%d: %w", imageIndex, err)
}
refs = append(refs, ref)
}
@@ -107,7 +108,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
archiveRef, ok := ref.(archiveReference)
if !ok {
- return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ return nil, fmt.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go
index 7acca210e..5604a5121 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/src.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/src.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
)
@@ -14,7 +15,7 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
var archive *tarfile.Reader
var closeArchive bool
if ref.archiveReader != nil {
@@ -28,7 +29,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveRe
archive = a
closeArchive = true
}
- src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex)
+ src := tarfile.NewSource(archive, closeArchive, ref.Transport().Name(), ref.ref, ref.sourceIndex)
return &archiveImageSource{
Source: src,
ref: ref,
diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go
index f00b77930..9044b340b 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/transport.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go
@@ -2,6 +2,7 @@ package archive
import (
"context"
+ "errors"
"fmt"
"strconv"
"strings"
@@ -11,7 +12,6 @@ import (
ctrImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func init() {
@@ -59,7 +59,7 @@ type archiveReference struct {
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
if refString == "" {
- return nil, errors.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
+ return nil, fmt.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
}
parts := strings.SplitN(refString, ":", 2)
@@ -72,21 +72,21 @@ func ParseReference(refString string) (types.ImageReference, error) {
if len(parts[1]) > 0 && parts[1][0] == '@' {
i, err := strconv.Atoi(parts[1][1:])
if err != nil {
- return nil, errors.Wrapf(err, "Invalid source index %s", parts[1])
+ return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err)
}
if i < 0 {
- return nil, errors.Errorf("Invalid source index @%d: must not be negative", i)
+ return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
}
sourceIndex = i
} else {
ref, err := reference.ParseNormalizedNamed(parts[1])
if err != nil {
- return nil, errors.Wrapf(err, "docker-archive parsing reference")
+ return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
}
ref = reference.TagNameOnly(ref)
refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged { // If ref contains a digest, TagNameOnly does not change it
- return nil, errors.Errorf("reference does not include a tag: %s", ref.String())
+ return nil, fmt.Errorf("reference does not include a tag: %s", ref.String())
}
nt = refTagged
}
@@ -110,16 +110,16 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
if strings.Contains(path, ":") {
- return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
+ return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
}
if ref != nil && sourceIndex != -1 {
- return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
+ return nil, fmt.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
}
if _, isDigest := ref.(reference.Canonical); isDigest {
- return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String())
+ return nil, fmt.Errorf("docker-archive doesn't support digest references: %s", ref.String())
}
if sourceIndex != -1 && sourceIndex < 0 {
- return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
+ return nil, fmt.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
}
return archiveReference{
path: path,
diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go
index 6a4b8c645..2d8fafe29 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/writer.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go
@@ -1,13 +1,14 @@
package archive
import (
+ "errors"
+ "fmt"
"io"
"os"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Writer manages a single in-progress Docker archive and allows adding images to it.
@@ -60,7 +61,7 @@ func openArchiveForWriting(path string) (*os.File, error) {
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
- return nil, errors.Wrapf(err, "opening file %q", path)
+ return nil, fmt.Errorf("opening file %q: %w", path, err)
}
succeeded := false
defer func() {
@@ -70,7 +71,7 @@ func openArchiveForWriting(path string) (*os.File, error) {
}()
fhStat, err := fh.Stat()
if err != nil {
- return nil, errors.Wrapf(err, "statting file %q", path)
+ return nil, fmt.Errorf("statting file %q: %w", path, err)
}
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
index f68981472..dc4aa70d3 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
@@ -2,13 +2,15 @@ package daemon
import (
"context"
+ "errors"
+ "fmt"
"io"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
"github.com/docker/docker/client"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -26,13 +28,13 @@ type daemonImageDestination struct {
}
// newImageDestination returns a types.ImageDestination for the specified image reference.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageDestination, error) {
if ref.ref == nil {
- return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
var mustMatchRuntimeOS = true
@@ -42,7 +44,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
c, err := newDockerClient(sys)
if err != nil {
- return nil, errors.Wrap(err, "initializing docker engine client")
+ return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
reader, writer := io.Pipe()
@@ -56,7 +58,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
return &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
- Destination: tarfile.NewDestination(sys, archive, namedTaggedRef),
+ Destination: tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef),
archive: archive,
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
@@ -84,7 +86,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
resp, err := c.ImageLoad(ctx, reader, true)
if err != nil {
- err = errors.Wrap(err, "saving image to docker engine")
+ err = fmt.Errorf("saving image to docker engine: %w", err)
return
}
defer resp.Body.Close()
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
index a6d8a6cf5..b57936654 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
@@ -2,10 +2,11 @@ package daemon
import (
"context"
+ "fmt"
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
type daemonImageSource struct {
@@ -22,16 +23,16 @@ type daemonImageSource struct {
// (We could, perhaps, expect an exact sequence, assume that the first plaintext file
// is the config, and that the following len(RootFS) files are the layers, but that feels
// way too brittle.)
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageSource, error) {
c, err := newDockerClient(sys)
if err != nil {
- return nil, errors.Wrap(err, "initializing docker engine client")
+ return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
// Either way ImageSave should create a tarball with exactly one image.
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
if err != nil {
- return nil, errors.Wrap(err, "loading image from docker engine")
+ return nil, fmt.Errorf("loading image from docker engine: %w", err)
}
defer inputStream.Close()
@@ -39,7 +40,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
if err != nil {
return nil, err
}
- src := tarfile.NewSource(archive, true, nil, -1)
+ src := tarfile.NewSource(archive, true, ref.Transport().Name(), nil, -1)
return &daemonImageSource{
ref: ref,
Source: src,
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
index d75579784..31ce167f1 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
@@ -2,6 +2,7 @@ package daemon
import (
"context"
+ "errors"
"fmt"
"github.com/containers/image/v5/docker/policyconfiguration"
@@ -10,7 +11,6 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
func init() {
@@ -39,7 +39,7 @@ func (t daemonTransport) ParseReference(reference string) (types.ImageReference,
func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
// ID values cannot be effectively namespaced, and are clearly invalid host:port values.
if _, err := digest.Parse(scope); err == nil {
- return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope)
+ return fmt.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope)
}
// FIXME? We could be verifying the various character set and length restrictions
@@ -70,7 +70,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
// The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
// Other digest references are ambiguous, so refuse them.
if dgst.Algorithm() != digest.Canonical {
- return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
+ return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
}
return NewReference(dgst, nil)
}
@@ -80,7 +80,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
return nil, err
}
if reference.FamiliarName(ref) == digest.Canonical.String() {
- return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
+ return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
}
return NewReference("", ref)
}
@@ -92,7 +92,7 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
}
if ref != nil {
if reference.IsNameOnly(ref) {
- return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// Most versions of docker/reference do not handle that (ignoring the tag), so reject such input.
@@ -102,7 +102,7 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
- return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
+ return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
}
}
return daemonReference{
@@ -215,5 +215,5 @@ func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemCon
// Should this just untag the image? Should this stop running containers?
// The semantics is not quite as clear as for remote repositories.
// The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
- return errors.Errorf("Deleting images not implemented for docker-daemon: images")
+ return errors.New("Deleting images not implemented for docker-daemon: images")
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index 29c256869..5de076740 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -4,6 +4,7 @@ import (
"context"
"crypto/tls"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -17,16 +18,19 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/containers/image/v5/version"
"github.com/containers/storage/pkg/homedir"
+ "github.com/docker/distribution/registry/api/errcode"
+ v2 "github.com/docker/distribution/registry/api/v2"
clientLib "github.com/docker/distribution/registry/client"
"github.com/docker/go-connections/tlsconfig"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
@@ -101,10 +105,11 @@ type dockerClient struct {
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
tlsClientConfig *tls.Config
// The following members are not set by newDockerClient and must be set by callers if needed.
- auth types.DockerAuthConfig
- registryToken string
- signatureBase signatureStorageBase
- scope authScope
+ auth types.DockerAuthConfig
+ registryToken string
+ signatureBase lookasideStorageBase
+ useSigstoreAttachments bool
+ scope authScope
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
@@ -209,13 +214,13 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
// signatureBase is always set in the return value
-func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
+func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) {
auth, err := config.GetCredentialsForRef(sys, ref.ref)
if err != nil {
- return nil, errors.Wrapf(err, "getting username and password")
+ return nil, fmt.Errorf("getting username and password: %w", err)
}
- sigBase, err := SignatureStorageBaseURL(sys, ref, write)
+ sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write)
if err != nil {
return nil, err
}
@@ -230,6 +235,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
client.registryToken = sys.DockerBearerRegistryToken
}
client.signatureBase = sigBase
+ client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref)
client.scope.actions = actions
client.scope.remoteName = reference.Path(ref.ref)
return client, nil
@@ -266,7 +272,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
skipVerify := false
reg, err := sysregistriesv2.FindRegistry(sys, reference)
if err != nil {
- return nil, errors.Wrapf(err, "loading registries")
+ return nil, fmt.Errorf("loading registries: %w", err)
}
if reg != nil {
if reg.Blocked {
@@ -294,7 +300,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
client, err := newDockerClient(sys, registry, registry)
if err != nil {
- return errors.Wrapf(err, "creating new docker client")
+ return fmt.Errorf("creating new docker client: %w", err)
}
client.auth = types.DockerAuthConfig{
Username: username,
@@ -343,7 +349,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
// We can't use GetCredentialsForRef here because we want to search the whole registry.
auth, err := config.GetCredentials(sys, registry)
if err != nil {
- return nil, errors.Wrapf(err, "getting username and password")
+ return nil, fmt.Errorf("getting username and password: %w", err)
}
// The /v2/_catalog endpoint has been disabled for docker.io therefore
@@ -357,7 +363,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
client, err := newDockerClient(sys, hostname, registry)
if err != nil {
- return nil, errors.Wrapf(err, "creating new docker client")
+ return nil, fmt.Errorf("creating new docker client: %w", err)
}
client.auth = auth
if sys != nil {
@@ -400,13 +406,13 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
- return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
+ return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err := httpResponseToError(resp, "")
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
- return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
+ return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
}
v2Res := &V2Results{}
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
@@ -628,7 +634,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
scopes []authScope) (*bearerToken, error) {
realm, ok := challenge.Parameters["realm"]
if !ok {
- return nil, errors.Errorf("missing realm in bearer auth challenge")
+ return nil, errors.New("missing realm in bearer auth challenge")
}
authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
@@ -676,7 +682,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
scopes []authScope) (*bearerToken, error) {
realm, ok := challenge.Parameters["realm"]
if !ok {
- return nil, errors.Errorf("missing realm in bearer auth challenge")
+ return nil, errors.New("missing realm in bearer auth challenge")
}
authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
@@ -760,7 +766,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
err = ping("http")
}
if err != nil {
- err = errors.Wrapf(err, "pinging container registry %s", c.registry)
+ err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
if c.sys != nil && c.sys.DockerDisableV1Ping {
return err
}
@@ -800,6 +806,166 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return c.detectPropertiesError
}
+func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
+ path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
+ res, err := c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res))
+ }
+
+ manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
+ if err != nil {
+ return nil, "", err
+ }
+ return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+}
+
+// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
+// This function can return nil reader when no url is supported by this function. In this case, the caller
+// should fallback to fetch the non-external blob (i.e. pull from the registry).
+func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
+ var (
+ resp *http.Response
+ err error
+ )
+ if len(urls) == 0 {
+ return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
+ }
+ for _, u := range urls {
+ url, err := url.Parse(u)
+ if err != nil || (url.Scheme != "http" && url.Scheme != "https") {
+ continue // unsupported url. skip this url.
+ }
+ // NOTE: we must not authenticate on additional URLs as those
+ // can be abused to leak credentials or tokens. Please
+ // refer to CVE-2020-15157 for more information.
+ resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
+ if err == nil {
+ if resp.StatusCode != http.StatusOK {
+ err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
+ logrus.Debug(err)
+ resp.Body.Close()
+ continue
+ }
+ break
+ }
+ }
+ if resp == nil && err == nil {
+ return nil, 0, nil // fallback to non-external blob
+ }
+ if err != nil {
+ return nil, 0, err
+ }
+ return resp.Body, getBlobSize(resp), nil
+}
+
+func getBlobSize(resp *http.Response) int64 {
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ size = -1
+ }
+ return size
+}
+
+// getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if len(info.URLs) != 0 {
+ r, s, err := c.getExternalBlob(ctx, info.URLs)
+ if err != nil {
+ return nil, 0, err
+ } else if r != nil {
+ return r, s, nil
+ }
+ }
+
+ path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
+ logrus.Debugf("Downloading %s", path)
+ res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
+ if err != nil {
+ return nil, 0, err
+ }
+ if err := httpResponseToError(res, "Error fetching blob"); err != nil {
+ res.Body.Close()
+ return nil, 0, err
+ }
+ cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref))
+ return res.Body, getBlobSize(res), nil
+}
+
+// getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit.
+func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
+ // Note that this copies all kinds of attachments: attestations, and whatever else is there,
+ // not just signatures. We leave the signature consumers to decide based on the MIME type.
+ reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+ payload, err := iolimits.ReadAtMost(reader, iolimits.MaxSignatureBodySize)
+ if err != nil {
+ return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
+ }
+ return payload, nil
+}
+
+// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error.
+func isManifestUnknownError(err error) bool {
+ var errs errcode.Errors
+ if !errors.As(err, &errs) || len(errs) == 0 {
+ return false
+ }
+ err = errs[0]
+ ec, ok := err.(errcode.ErrorCoder)
+ if !ok {
+ return false
+ }
+ return ec.ErrorCode() == v2.ErrorCodeManifestUnknown
+}
+
+// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for
+// digest in ref.
+// It returns (nil, nil) if the manifest does not exist.
+func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
+ tag := sigstoreAttachmentTag(digest)
+ sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("Looking for sigstore attachments in %s", sigstoreRef.String())
+ manifestBlob, mimeType, err := c.fetchManifest(ctx, ref, tag)
+ if err != nil {
+ // FIXME: Are we going to need better heuristics??
+ // This alone is probably a good enough reason for sigstore to be opt-in only,
+ // otherwise we would just break ordinary copies.
+ if isManifestUnknownError(err) {
+ logrus.Debugf("Fetching sigstore attachment manifest failed, assuming it does not exist: %v", err)
+ return nil, nil
+ }
+ logrus.Debugf("Fetching sigstore attachment manifest failed: %v", err)
+ return nil, err
+ }
+ if mimeType != imgspecv1.MediaTypeImageManifest {
+ // FIXME: Try anyway??
+ return nil, fmt.Errorf("unexpected MIME type for sigstore attachment manifest %s: %q",
+ sigstoreRef.String(), mimeType)
+ }
+ res, err := manifest.OCI1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing manifest %s: %w", sigstoreRef.String(), err)
+ }
+ return res, nil
+}
+
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
// using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
@@ -811,7 +977,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
- return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
+ return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), clientLib.HandleErrorResponse(res))
}
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
@@ -821,7 +987,12 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
var parsedBody extensionSignatureList
if err := json.Unmarshal(body, &parsedBody); err != nil {
- return nil, errors.Wrapf(err, "decoding signature list")
+ return nil, fmt.Errorf("decoding signature list: %w", err)
}
return &parsedBody, nil
}
+
+// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
+func sigstoreAttachmentTag(d digest.Digest) string {
+ return strings.Replace(d.String(), ":", "-", 1) + ".sig"
+}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 73687e86f..3e8dbbee1 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -3,6 +3,7 @@ package docker
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"net/url"
@@ -13,7 +14,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
@@ -56,13 +56,17 @@ func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) {
func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) {
dr, ok := ref.(dockerReference)
if !ok {
- return nil, errors.Errorf("ref must be a dockerReference")
+ return nil, errors.New("ref must be a dockerReference")
}
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
path := fmt.Sprintf(tagsPath, reference.Path(dr.ref))
- client, err := newDockerClientFromRef(sys, dr, false, "pull")
+ client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
if err != nil {
- return nil, errors.Wrap(err, "failed to create client")
+ return nil, fmt.Errorf("failed to create client: %w", err)
}
tags := make([]string, 0)
@@ -116,7 +120,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) {
dr, ok := ref.(dockerReference)
if !ok {
- return "", errors.Errorf("ref must be a dockerReference")
+ return "", errors.New("ref must be a dockerReference")
}
tagOrDigest, err := dr.tagOrDigest()
@@ -124,9 +128,13 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
return "", err
}
- client, err := newDockerClientFromRef(sys, dr, false, "pull")
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return "", err
+ }
+ client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
if err != nil {
- return "", errors.Wrap(err, "failed to create client")
+ return "", fmt.Errorf("failed to create client: %w", err)
}
path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
@@ -141,7 +149,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
- return "", errors.Wrapf(registryHTTPResponseToError(res), "reading digest %s in %s", tagOrDigest, dr.ref.Name())
+ return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res))
}
dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index d02100cf8..6cd693b6b 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -5,6 +5,7 @@ import (
"context"
"crypto/rand"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -15,20 +16,29 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+
ref dockerReference
c *dockerClient
// State
@@ -36,15 +46,40 @@ type dockerImageDestination struct {
}
// newImageDestination creates a new ImageDestination for the specified image reference.
-func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
- c, err := newDockerClientFromRef(sys, ref, true, "pull,push")
+func newImageDestination(sys *types.SystemContext, ref dockerReference) (private.ImageDestination, error) {
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
+ c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "pull,push")
if err != nil {
return nil, err
}
- return &dockerImageDestination{
+ mimeTypes := []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ imgspecv1.MediaTypeImageIndex,
+ manifest.DockerV2ListMediaType,
+ }
+ if c.sys == nil || !c.sys.DockerDisableDestSchema1MIMETypes {
+ mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
+ }
+
+ dest := &dockerImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: mimeTypes,
+ DesiredLayerCompression: types.Compress,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
ref: ref,
c: c,
- }, nil
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -58,19 +93,6 @@ func (d *dockerImageDestination) Close() error {
return nil
}
-func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
- mimeTypes := []string{
- imgspecv1.MediaTypeImageManifest,
- manifest.DockerV2Schema2MediaType,
- imgspecv1.MediaTypeImageIndex,
- manifest.DockerV2ListMediaType,
- }
- if d.c.sys == nil || !d.c.sys.DockerDisableDestSchema1MIMETypes {
- mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
- }
- return mimeTypes
-}
-
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
@@ -83,32 +105,16 @@ func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
case d.c.signatureBase != nil:
return nil
default:
- return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
}
}
-func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Compress
-}
-
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
-}
-
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }
@@ -117,19 +123,14 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
return len(p), nil
}
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
- return true
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
@@ -146,7 +147,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
if inputInfo.Digest != "" {
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
- haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, cache)
+ haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
if err != nil {
return types.BlobInfo{}, err
}
@@ -165,11 +166,11 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
- return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "initiating layer upload to %s in %s", uploadPath, d.c.registry)
+ return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
}
uploadLocation, err := res.Location()
if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "determining upload URL")
+ return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err)
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
@@ -188,11 +189,11 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
- return nil, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer chunked")
+ return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res))
}
uploadLocation, err := res.Location()
if err != nil {
- return nil, errors.Wrap(err, "determining upload URL")
+ return nil, fmt.Errorf("determining upload URL: %w", err)
}
return uploadLocation, nil
}()
@@ -213,11 +214,11 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
- return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation)
+ return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
}
logrus.Debugf("Upload of layer %s complete", blobDigest)
- cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
+ options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil
}
@@ -238,12 +239,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
- return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "checking whether a blob %s exists in %s", digest, repo.Name())
+ return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
default:
- return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
+ return false, -1, fmt.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
}
}
@@ -272,7 +273,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
uploadLocation, err := res.Location()
if err != nil {
- return errors.Wrap(err, "determining upload URL after a mount attempt")
+ return fmt.Errorf("determining upload URL after a mount attempt: %w", err)
}
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted())
res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope)
@@ -288,14 +289,14 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
default:
logrus.Debugf("Error mounting, response %#v", *res)
- return errors.Wrapf(registryHTTPResponseToError(res), "mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res))
}
}
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
// The caller must ensure info.Digest is set.
-func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (bool, types.BlobInfo, error) {
+func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) {
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
if err != nil {
return false, types.BlobInfo{}, err
@@ -307,22 +308,20 @@ func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info t
return false, types.BlobInfo{}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
// First, check whether the blob happens to already exist at the destination.
- haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, cache)
+ haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
return false, types.BlobInfo{}, err
}
@@ -331,8 +330,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
// Then try reusing blobs from other locations.
- bic := blobinfocache.FromBlobInfoCache(cache)
- candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute)
+ candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
for _, candidate := range candidates {
candidateRepo, err := parseBICLocationReference(candidate.Location)
if err != nil {
@@ -386,7 +384,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
}
- bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
+ options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
@@ -417,14 +415,14 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
// Double-check that the manifest we've been given matches the digest we've been given.
matches, err := manifest.MatchesDigest(m, *instanceDigest)
if err != nil {
- return errors.Wrapf(err, "digesting manifest in PutManifest")
+ return fmt.Errorf("digesting manifest in PutManifest: %w", err)
}
if !matches {
manifestDigest, merr := manifest.Digest(m)
if merr != nil {
- return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr)
+ return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr)
}
- return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
+ return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
}
} else {
// Compute the digest of the main manifest, or the list if it's a list, so that we
@@ -442,7 +440,12 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
}
}
- path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
+ return d.uploadManifest(ctx, m, refTail)
+}
+
+// uploadManifest writes manifest to tagOrDigest.
+func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, tagOrDigest string) error {
+ path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), tagOrDigest)
headers := map[string][]string{}
mimeType := manifest.GuessMIMEType(m)
@@ -456,7 +459,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
defer res.Body.Close()
if !successStatus(res.StatusCode) {
rawErr := registryHTTPResponseToError(res)
- err := errors.Wrapf(rawErr, "uploading manifest %s to %s", refTail, d.ref.ref.Name())
+ err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr)
if isManifestInvalidError(rawErr) {
err = types.ManifestTypeRejectedError{Err: err}
}
@@ -514,38 +517,63 @@ func isManifestInvalidError(err error) bool {
}
}
-// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when
-// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- // Do not fail if we don’t really need to support signatures.
- if len(signatures) == 0 {
- return nil
- }
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
if instanceDigest == nil {
if d.manifestDigest == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
- return errors.Errorf("Unknown manifest digest, can't add signatures")
+ return errors.New("Unknown manifest digest, can't add signatures")
}
instanceDigest = &d.manifestDigest
}
- if err := d.c.detectProperties(ctx); err != nil {
- return err
+ sigstoreSignatures := []signature.Sigstore{}
+ otherSignatures := []signature.Signature{}
+ for _, sig := range signatures {
+ if sigstoreSig, ok := sig.(signature.Sigstore); ok {
+ sigstoreSignatures = append(sigstoreSignatures, sigstoreSig)
+ } else {
+ otherSignatures = append(otherSignatures, sig)
+ }
}
- switch {
- case d.c.supportsSignatures:
- return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
- case d.c.signatureBase != nil:
- return d.putSignaturesToLookaside(signatures, *instanceDigest)
- default:
- return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+
+ // Only write sigstores signatures to sigstores attachments. We _could_ store them to lookaside
+ // instead, but that would probably be rather surprising.
+ // FIXME: So should we enable sigstores in all cases? Or write in all cases, but opt-in to read?
+
+ if len(sigstoreSignatures) != 0 {
+ if err := d.putSignaturesToSigstoreAttachments(ctx, sigstoreSignatures, *instanceDigest); err != nil {
+ return err
+ }
+ }
+
+ if len(otherSignatures) != 0 {
+ if err := d.c.detectProperties(ctx); err != nil {
+ return err
+ }
+ switch {
+ case d.c.supportsSignatures:
+ if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil {
+ return err
+ }
+ case d.c.signatureBase != nil:
+ if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil {
+ return err
+ }
+ default:
+ return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
}
+
+ return nil
}
-// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
+// putSignaturesToLookaside implements PutSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
// which is not nil, for a manifest with manifestDigest.
-func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error {
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature.Signature, manifestDigest digest.Digest) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
@@ -556,7 +584,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
- url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
+ url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
err := d.putOneSignature(url, signature)
if err != nil {
return err
@@ -568,7 +596,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
- url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
+ url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
missing, err := d.c.deleteOneSignature(url)
if err != nil {
return err
@@ -581,9 +609,9 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
return nil
}
-// putOneSignature stores one signature to url.
+// putOneSignature stores sig to url.
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
+func (d *dockerImageDestination) putOneSignature(url *url.URL, sig signature.Signature) error {
switch url.Scheme {
case "file":
logrus.Debugf("Writing to %s", url.Path)
@@ -591,17 +619,155 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
if err != nil {
return err
}
- err = os.WriteFile(url.Path, signature, 0644)
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile(url.Path, blob, 0644)
if err != nil {
return err
}
return nil
case "http", "https":
- return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted())
+ return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted())
default:
- return errors.Errorf("Unsupported scheme when writing signature to %s", url.Redacted())
+ return fmt.Errorf("Unsupported scheme when writing signature to %s", url.Redacted())
+ }
+}
+
+func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.Context, signatures []signature.Sigstore, manifestDigest digest.Digest) error {
+ if !d.c.useSigstoreAttachments {
+ return errors.New("writing sigstore attachments is disabled by configuration")
+ }
+
+ ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest)
+ if err != nil {
+ return nil
+ }
+ var ociConfig imgspecv1.Image // Most fields empty by default
+ if ociManifest == nil {
+ ociManifest = manifest.OCI1FromComponents(imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Digest: "", // We will fill this in later.
+ Size: 0,
+ }, nil)
+ } else {
+ logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String())
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
+ configBlob, err := d.c.getOCIDescriptorContents(ctx, d.ref, ociManifest.Config, iolimits.MaxConfigBodySize,
+ none.NoCache)
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(configBlob, &ociConfig); err != nil {
+ return fmt.Errorf("parsing sigstore attachment config %s in %s: %w", ociManifest.Config.Digest.String(),
+ d.ref.ref.Name(), err)
+ }
+ }
+
+ for _, sig := range signatures {
+ mimeType := sig.UntrustedMIMEType()
+ payloadBlob := sig.UntrustedPayload()
+ annotations := sig.UntrustedAnnotations()
+
+ alreadyOnRegistry := false
+ for _, layer := range ociManifest.Layers {
+ if layerMatchesSigstoreSignature(layer, mimeType, payloadBlob, annotations) {
+ logrus.Debugf("Signature with digest %s already exists on the registry", layer.Digest.String())
+ alreadyOnRegistry = true
+ break
+ }
+ }
+ if alreadyOnRegistry {
+ continue
+ }
+
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads.
+ // That might eventually need to change if payloads grow to be not just signatures, but something
+ // significantly large.
+ sigDesc, err := d.putBlobBytesAsOCI(ctx, payloadBlob, mimeType, private.PutBlobOptions{
+ Cache: none.NoCache,
+ IsConfig: false,
+ EmptyLayer: false,
+ LayerIndex: nil,
+ })
+ if err != nil {
+ return err
+ }
+ sigDesc.Annotations = annotations
+ ociManifest.Layers = append(ociManifest.Layers, sigDesc)
+ ociConfig.RootFS.DiffIDs = append(ociConfig.RootFS.DiffIDs, sigDesc.Digest)
+ logrus.Debugf("Adding new signature, digest %s", sigDesc.Digest.String())
+ }
+
+ configBlob, err := json.Marshal(ociConfig)
+ if err != nil {
+ return err
}
+ logrus.Debugf("Uploading updated sigstore attachment config")
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
+ configDesc, err := d.putBlobBytesAsOCI(ctx, configBlob, imgspecv1.MediaTypeImageConfig, private.PutBlobOptions{
+ Cache: none.NoCache,
+ IsConfig: true,
+ EmptyLayer: false,
+ LayerIndex: nil,
+ })
+ if err != nil {
+ return nil
+ }
+ ociManifest.Config = configDesc
+
+ manifestBlob, err := ociManifest.Serialize()
+ if err != nil {
+ return nil
+ }
+ logrus.Debugf("Uploading sigstore attachment manifest")
+ return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest))
+}
+
+func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
+ payloadBlob []byte, annotations map[string]string) bool {
+ if layer.MediaType != mimeType ||
+ layer.Size != int64(len(payloadBlob)) ||
+ // This is not quite correct, we should use the layer’s digest algorithm.
+ // But right now we don’t want to deal with corner cases like bad digest formats
+ // or unavailable algorithms; in the worst case we end up with duplicate signature
+ // entries.
+ layer.Digest.String() != digest.FromBytes(payloadBlob).String() {
+ return false
+ }
+ if len(layer.Annotations) != len(annotations) {
+ return false
+ }
+ for k, v1 := range layer.Annotations {
+ if v2, ok := annotations[k]; !ok || v1 != v2 {
+ return false
+ }
+ }
+ // All annotations in layer exist in sig, and the number of annotations is the same, so all annotations
+ // in sig also exist in layer.
+ return true
+}
+
+// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate
+// OCI descriptior.
+func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) {
+ blobDigest := digest.FromBytes(contents)
+ info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents),
+ types.BlobInfo{
+ Digest: blobDigest,
+ Size: int64(len(contents)),
+ MediaType: mimeType,
+ }, options)
+ if err != nil {
+ return imgspecv1.Descriptor{}, fmt.Errorf("writing blob %s: %w", blobDigest.String(), err)
+ }
+ return imgspecv1.Descriptor{
+ MediaType: mimeType,
+ Digest: info.Digest,
+ Size: info.Size,
+ }, nil
}
// deleteOneSignature deletes a signature from url, if it exists.
@@ -618,15 +784,15 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
return false, err
case "http", "https":
- return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted())
+ return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted())
default:
- return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted())
+ return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted())
}
}
-// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension,
+// putSignaturesToAPIExtension implements PutSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
// for a manifest with manifestDigest.
-func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error {
+func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures []signature.Signature, manifestDigest digest.Digest) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
@@ -646,7 +812,13 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
}
sigExists:
- for _, newSig := range signatures {
+ for _, newSigWithFormat := range signatures {
+ newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(newSigWithFormat)
+ }
+ newSig := newSigSimple.UntrustedSignature()
+
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
@@ -659,7 +831,7 @@ sigExists:
randBytes := make([]byte, 16)
n, err := rand.Read(randBytes)
if err != nil || n != 16 {
- return errors.Wrapf(err, "generating random signature len %d", n)
+ return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
@@ -685,7 +857,7 @@ sigExists:
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
- return errors.Wrapf(registryHTTPResponseToError(res), "uploading signature to %s in %s", path, d.c.registry)
+ return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res))
}
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 27fb838f5..b0e877971 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -2,6 +2,7 @@ package docker
import (
"context"
+ "errors"
"fmt"
"io"
"mime"
@@ -10,22 +11,29 @@ import (
"net/url"
"os"
"regexp"
- "strconv"
"strings"
"sync"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.ImplementsGetBlobAt
+
logicalRef dockerReference // The reference the user requested.
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
c *dockerClient
@@ -37,9 +45,13 @@ type dockerImageSource struct {
// newImageSource creates a new ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
if err != nil {
- return nil, errors.Wrapf(err, "loading registries configuration")
+ return nil, fmt.Errorf("loading registries configuration: %w", err)
}
if registry == nil {
// No configuration was found for the provided reference, so use the
@@ -72,7 +84,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
} else {
logrus.Debugf("Trying to access %q", pullSource.Reference)
}
- s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
+ s, err := newImageSourceAttempt(ctx, sys, ref, pullSource, registryConfig)
if err == nil {
return s, nil
}
@@ -96,14 +108,15 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
// The paired [] at least have some chance of being unambiguous.
extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
}
- return nil, errors.Wrapf(primary.err, "(Mirrors also failed: %s): %s", strings.Join(extras, "\n"), primary.ref.String())
+ return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
}
}
// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable.
// The caller must call .Close() on the returned ImageSource.
-func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) {
+func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource,
+ registryConfig *registryConfiguration) (*dockerImageSource, error) {
physicalRef, err := newReference(pullSource.Reference)
if err != nil {
return nil, err
@@ -118,17 +131,22 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
endpointSys = &copy
}
- client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull")
+ client, err := newDockerClientFromRef(endpointSys, physicalRef, registryConfig, false, "pull")
if err != nil {
return nil, err
}
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
s := &dockerImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+
logicalRef: logicalRef,
physicalRef: physicalRef,
c: client,
}
+ s.Compat = impl.AddCompat(s)
if err := s.ensureManifestIsLoaded(ctx); err != nil {
return nil, err
@@ -147,23 +165,6 @@ func (s *dockerImageSource) Close() error {
return nil
}
-// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
-func (s *dockerImageSource) SupportsGetBlobAt() bool {
- return true
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
-
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
func simplifyContentType(contentType string) string {
@@ -193,25 +194,7 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
}
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
- path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest)
- headers := map[string][]string{
- "Accept": manifest.DefaultRequestedManifestMIMETypes,
- }
- res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
- if err != nil {
- return nil, "", err
- }
- logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
- defer res.Body.Close()
- if res.StatusCode != http.StatusOK {
- return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
- }
-
- manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
- if err != nil {
- return nil, "", err
- }
- return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+ return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
}
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
@@ -241,58 +224,6 @@ func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
return nil
}
-// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
-// This function can return nil reader when no url is supported by this function. In this case, the caller
-// should fallback to fetch the non-external blob (i.e. pull from the registry).
-func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
- var (
- resp *http.Response
- err error
- )
- if len(urls) == 0 {
- return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
- }
- for _, u := range urls {
- url, err := url.Parse(u)
- if err != nil || (url.Scheme != "http" && url.Scheme != "https") {
- continue // unsupported url. skip this url.
- }
- // NOTE: we must not authenticate on additional URLs as those
- // can be abused to leak credentials or tokens. Please
- // refer to CVE-2020-15157 for more information.
- resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
- if err == nil {
- if resp.StatusCode != http.StatusOK {
- err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
- logrus.Debug(err)
- resp.Body.Close()
- continue
- }
- break
- }
- }
- if resp == nil && err == nil {
- return nil, 0, nil // fallback to non-external blob
- }
- if err != nil {
- return nil, 0, err
- }
- return resp.Body, getBlobSize(resp), nil
-}
-
-func getBlobSize(resp *http.Response) int64 {
- size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
- if err != nil {
- size = -1
- }
- return size
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
- return true
-}
-
// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks
func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) {
defer close(streams)
@@ -337,7 +268,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
}
boundary, found := params["boundary"]
if !found {
- errs <- errors.Errorf("could not find boundary")
+ errs <- errors.New("could not find boundary")
body.Close()
return
}
@@ -352,7 +283,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
errs <- err
}
if parts != len(chunks) {
- errs <- errors.Errorf("invalid number of chunks returned by the server")
+ errs <- errors.New("invalid number of chunks returned by the server")
}
return
}
@@ -443,7 +374,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
default:
err := httpResponseToError(res, "Error fetching partial blob")
if err == nil {
- err = errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ err = fmt.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
}
res.Body.Close()
return nil, nil, err
@@ -454,45 +385,41 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- if len(info.URLs) != 0 {
- r, s, err := s.getExternalBlob(ctx, info.URLs)
- if err != nil {
- return nil, 0, err
- } else if r != nil {
- return r, s, nil
- }
- }
-
- path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
- logrus.Debugf("Downloading %s", path)
- res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
- if err != nil {
- return nil, 0, err
- }
- if err := httpResponseToError(res, "Error fetching blob"); err != nil {
- res.Body.Close()
- return nil, 0, err
- }
- cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef))
- return res.Body, getBlobSize(res), nil
+ return s.c.getBlob(ctx, s.physicalRef, info, cache)
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
if err := s.c.detectProperties(ctx); err != nil {
return nil, err
}
+ var res []signature.Signature
switch {
case s.c.supportsSignatures:
- return s.getSignaturesFromAPIExtension(ctx, instanceDigest)
+ sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, sigs...)
case s.c.signatureBase != nil:
- return s.getSignaturesFromLookaside(ctx, instanceDigest)
+ sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, sigs...)
default:
- return nil, errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
+
+ sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
}
+ res = append(res, sigstoreSigs...)
+ return res, nil
}
// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference,
@@ -513,18 +440,18 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
return manifest.Digest(s.cachedManifest)
}
-// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
+// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
// which is not nil.
-func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
return nil, err
}
// NOTE: Keep this in sync with docs/signature-protocols.md!
- signatures := [][]byte{}
+ signatures := []signature.Signature{}
for i := 0; ; i++ {
- url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
+ url := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
signature, missing, err := s.getOneSignature(ctx, url)
if err != nil {
return nil, err
@@ -537,20 +464,24 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
return signatures, nil
}
-// getOneSignature downloads one signature from url.
-// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
+// getOneSignature downloads one signature from url, and returns (signature, false, nil)
+// If it successfully determines that the signature does not exist, returns (nil, true, nil).
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) {
+func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature.Signature, bool, error) {
switch url.Scheme {
case "file":
logrus.Debugf("Reading %s", url.Path)
- sig, err := os.ReadFile(url.Path)
+ sigBlob, err := os.ReadFile(url.Path)
if err != nil {
if os.IsNotExist(err) {
return nil, true, nil
}
return nil, false, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing signature %q: %w", url.Path, err)
+ }
return sig, false, nil
case "http", "https":
@@ -567,21 +498,25 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
if res.StatusCode == http.StatusNotFound {
return nil, true, nil
} else if res.StatusCode != http.StatusOK {
- return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
+ return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
}
- sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
+ sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
if err != nil {
return nil, false, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing signature %s: %w", url.Redacted(), err)
+ }
return sig, false, nil
default:
- return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.Redacted())
+ return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.Redacted())
}
}
-// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
-func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension.
+func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
return nil, err
@@ -592,17 +527,59 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i
return nil, err
}
- var sigs [][]byte
+ var sigs []signature.Signature
for _, sig := range parsedBody.Signatures {
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
- sigs = append(sigs, sig.Content)
+ sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
}
}
return sigs, nil
}
+func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ if !s.c.useSigstoreAttachments {
+ logrus.Debugf("Not looking for sigstore attachments: disabled by configuration")
+ return nil, nil
+ }
+
+ manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest)
+ if err != nil {
+ return nil, err
+ }
+ if ociManifest == nil {
+ return nil, nil
+ }
+
+ logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers))
+ res := []signature.Signature{}
+ for layerIndex, layer := range ociManifest.Layers {
+ // Note that this copies all kinds of attachments: attestations, and whatever else is there,
+ // not just signatures. We leave the signature consumers to decide based on the MIME type.
+ logrus.Debugf("Fetching sigstore attachment %d/%d: %s", layerIndex+1, len(ociManifest.Layers), layer.Digest.String())
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads.
+ // That might eventually need to change if payloads grow to be not just signatures, but something
+ // significantly large.
+ payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize,
+ none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
+ }
+ return res, nil
+}
+
// deleteImage deletes the named image from the registry, if supported.
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return err
+ }
// docker/distribution does not document what action should be used for deleting images.
//
// Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it.
@@ -610,7 +587,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
// OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user).
//
// We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything".
- c, err := newDockerClientFromRef(sys, ref, true, "*")
+ c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "*")
if err != nil {
return err
}
@@ -635,9 +612,9 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
switch get.StatusCode {
case http.StatusOK:
case http.StatusNotFound:
- return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
+ return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
default:
- return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
+ return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
}
manifestDigest, err := manifest.Digest(manifestBody)
@@ -659,11 +636,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return err
}
if delete.StatusCode != http.StatusAccepted {
- return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
+ return fmt.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
}
for i := 0; ; i++ {
- url := signatureStorageURL(c.signatureBase, manifestDigest, i)
+ url := lookasideStorageURL(c.signatureBase, manifestDigest, i)
missing, err := c.deleteOneSignature(url)
if err != nil {
return err
diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go
index 541e053f3..0544bb3c9 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go
@@ -2,6 +2,7 @@ package docker
import (
"context"
+ "errors"
"fmt"
"strings"
@@ -9,7 +10,6 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func init() {
@@ -49,7 +49,7 @@ type dockerReference struct {
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
if !strings.HasPrefix(refString, "//") {
- return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
+ return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
}
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
if err != nil {
@@ -67,7 +67,7 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
// newReference returns a dockerReference for a named reference.
func newReference(ref reference.Named) (dockerReference, error) {
if reference.IsNameOnly(ref) {
- return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
@@ -77,7 +77,7 @@ func newReference(ref reference.Named) (dockerReference, error) {
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
- return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
+ return dockerReference{}, errors.New("Docker references with both a tag and digest are currently not supported")
}
return dockerReference{
@@ -164,5 +164,5 @@ func (ref dockerReference) tagOrDigest() (string, error) {
return ref.Tag(), nil
}
// This should not happen, NewReference above refuses reference.IsNameOnly values.
- return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
+ return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
}
diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go
index 6f707db7d..79590c4c7 100644
--- a/vendor/github.com/containers/image/v5/docker/errors.go
+++ b/vendor/github.com/containers/image/v5/docker/errors.go
@@ -6,7 +6,6 @@ import (
"net/http"
"github.com/docker/distribution/registry/client"
- perrors "github.com/pkg/errors"
)
var (
@@ -42,7 +41,7 @@ func httpResponseToError(res *http.Response, context string) error {
if context != "" {
context = context + ": "
}
- return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
+ return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}
}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
index 7e1580990..9a0ea683e 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
@@ -4,20 +4,29 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
+// Destination is a partial implementation of private.ImageDestination for writing to an io.Writer.
type Destination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.NoSignaturesInitialize
+
archive *Writer
repoTags []reference.NamedTagged
// Other state.
@@ -26,16 +35,34 @@ type Destination struct {
}
// NewDestination returns a tarfile.Destination adding images to the specified Writer.
-func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination {
+func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged) *Destination {
repoTags := []reference.NamedTagged{}
if ref != nil {
repoTags = append(repoTags, ref)
}
- return &Destination{
+ dest := &Destination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
+ },
+ DesiredLayerCompression: types.Decompress,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
+ // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
+ // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
+ // much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName),
+ NoSignaturesInitialize: stubs.NoSignatures("Storing signatures for docker tar files is not supported"),
+
archive: archive,
repoTags: repoTags,
sysCtx: sys,
}
+ dest.Compat = impl.AddCompat(dest)
+ return dest
}
// AddRepoTags adds the specified tags to the destination's repoTags.
@@ -43,54 +70,14 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
d.repoTags = append(d.repoTags, tags...)
}
-// SupportedManifestMIMETypes tells which manifest mime types the destination supports
-// If an empty slice or nil it's returned, then any mime type can be tried to upload
-func (d *Destination) SupportedManifestMIMETypes() []string {
- return []string{
- manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *Destination) SupportsSignatures(ctx context.Context) error {
- return errors.Errorf("Storing signatures for docker tar files is not supported")
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *Destination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *Destination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *Destination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *Destination) HasThreadSafePutBlob() bool {
- // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
- // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
- // much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
if inputInfo.Size == -1 || inputInfo.Digest == "" {
@@ -118,14 +105,14 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return reusedInfo, nil
}
- if isConfig {
+ if options.IsConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "reading Config file stream")
+ return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
}
d.config = buf
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "writing Config file")
+ return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
}
} else {
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
@@ -136,16 +123,14 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if err := d.archive.lock(); err != nil {
return false, types.BlobInfo{}, err
}
@@ -168,10 +153,10 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
// so the caller trying a different manifest kind would be pointless.
var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
- return errors.Wrap(err, "parsing manifest")
+ return fmt.Errorf("parsing manifest: %w", err)
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
- return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
+ return errors.New("Unsupported manifest type, need a Docker schema 2 manifest")
}
if err := d.archive.lock(); err != nil {
@@ -185,16 +170,3 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
}
-
-// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
-// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
-// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
-func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- if instanceDigest != nil {
- return errors.Errorf(`Manifest lists are not supported for docker tar files`)
- }
- if len(signatures) != 0 {
- return errors.Errorf("Storing signatures for docker tar files is not supported")
- }
- return nil
-}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
index c77c002d1..eec7b84e5 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
@@ -3,6 +3,8 @@ package tarfile
import (
"archive/tar"
"encoding/json"
+ "errors"
+ "fmt"
"io"
"os"
"path"
@@ -12,7 +14,6 @@ import (
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
@@ -29,7 +30,7 @@ type Reader struct {
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
file, err := os.Open(path)
if err != nil {
- return nil, errors.Wrapf(err, "opening file %q", path)
+ return nil, fmt.Errorf("opening file %q: %w", path, err)
}
defer file.Close()
@@ -37,7 +38,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
// as a source. Otherwise we pass the stream to NewReaderFromStream.
stream, isCompressed, err := compression.AutoDecompress(file)
if err != nil {
- return nil, errors.Wrapf(err, "detecting compression for file %q", path)
+ return nil, fmt.Errorf("detecting compression for file %q: %w", path, err)
}
defer stream.Close()
if !isCompressed {
@@ -54,7 +55,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
// Save inputStream to a temporary file
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil {
- return nil, errors.Wrap(err, "creating temporary file")
+ return nil, fmt.Errorf("creating temporary file: %w", err)
}
defer tarCopyFile.Close()
@@ -70,7 +71,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
// giving users really confusing "invalid tar header" errors).
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
if err != nil {
- return nil, errors.Wrap(err, "auto-decompressing input")
+ return nil, fmt.Errorf("auto-decompressing input: %w", err)
}
defer uncompressedStream.Close()
@@ -79,7 +80,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
// TODO: This can take quite some time, and should ideally be cancellable
// using a context.Context.
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
- return nil, errors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name())
+ return nil, fmt.Errorf("copying contents to temporary file %q: %w", tarCopyFile.Name(), err)
}
succeeded = true
@@ -112,7 +113,7 @@ func newReader(path string, removeOnClose bool) (*Reader, error) {
return nil, err
}
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
- return nil, errors.Wrap(err, "decoding tar manifest.json")
+ return nil, fmt.Errorf("decoding tar manifest.json: %w", err)
}
succeeded = true
@@ -136,7 +137,7 @@ func (r *Reader) Close() error {
func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
switch {
case ref != nil && sourceIndex != -1:
- return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
+ return nil, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
ref.String(), sourceIndex)
case ref != nil:
@@ -145,25 +146,25 @@ func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int)
for tagIndex, tag := range r.Manifest[i].RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
- return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
+ return nil, -1, fmt.Errorf("Invalid tag %#v in manifest.json item @%d: %w", tag, i, err)
}
if parsedTag.String() == refString {
return &r.Manifest[i], tagIndex, nil
}
}
}
- return nil, -1, errors.Errorf("Tag %#v not found", refString)
+ return nil, -1, fmt.Errorf("Tag %#v not found", refString)
case sourceIndex != -1:
if sourceIndex >= len(r.Manifest) {
- return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
+ return nil, -1, fmt.Errorf("Invalid source index @%d, only %d manifest items available",
sourceIndex, len(r.Manifest))
}
return &r.Manifest[sourceIndex], -1, nil
default:
if len(r.Manifest) != 1 {
- return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
+ return nil, -1, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
}
return &r.Manifest[0], -1, nil
}
@@ -226,7 +227,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
}
if !header.FileInfo().Mode().IsRegular() {
- return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
+ return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
@@ -257,7 +258,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
file, err := r.openTarComponent(path)
if err != nil {
- return nil, errors.Wrapf(err, "loading tar component %s", path)
+ return nil, fmt.Errorf("loading tar component %s: %w", path, err)
}
defer file.Close()
bytes, err := iolimits.ReadAtMost(file, limit)
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
index 8e9be17c1..b63b5316e 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
@@ -5,22 +5,31 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
"os"
"path"
"sync"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
archive *Reader
closeArchive bool // .Close() the archive when the source is closed.
// If ref is nil and sourceIndex is -1, indicates the only image in the archive.
@@ -46,13 +55,20 @@ type layerInfo struct {
// NewSource returns a tarfile.Source for an image in the specified archive matching ref
// and sourceIndex (or the only image if they are (nil, -1)).
// The archive will be closed if closeArchive
-func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source {
- return &Source{
+func NewSource(archive *Reader, closeArchive bool, transportName string, ref reference.NamedTagged, sourceIndex int) *Source {
+ s := &Source{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAtRaw(transportName),
+
archive: archive,
closeArchive: closeArchive,
ref: ref,
sourceIndex: sourceIndex,
}
+ s.Compat = impl.AddCompat(s)
+ return s
}
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
@@ -79,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
}
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
- return errors.Wrapf(err, "decoding tar config %s", tarManifest.Config)
+ return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err)
}
if parsedConfig.RootFS == nil {
- return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
+ return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
}
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
@@ -115,7 +131,7 @@ func (s *Source) TarManifest() []ManifestItem {
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
// Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
- return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
+ return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
}
knownLayers := map[digest.Digest]*layerInfo{}
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
@@ -128,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
layerPath := path.Clean(tarManifest.Layers[i])
if _, ok := unknownLayerSizes[layerPath]; ok {
- return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
+ return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
}
li := &layerInfo{ // A new element in each iteration
path: layerPath,
@@ -163,7 +179,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// the slower method of checking if it's compressed.
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
if err != nil {
- return nil, errors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath)
+ return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err)
}
defer uncompressedStream.Close()
@@ -171,7 +187,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
if isCompressed {
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
if err != nil {
- return nil, errors.Wrapf(err, "reading %s to find its size", layerPath)
+ return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err)
}
}
li.size = uncompressedSize
@@ -179,7 +195,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
}
if len(unknownLayerSizes) != 0 {
- return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
+ return nil, errors.New("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
}
return knownLayers, nil
@@ -213,7 +229,7 @@ func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest)
for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID]
if !ok {
- return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
+ return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
}
m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
Digest: diffID, // diffID is a digest of the uncompressed tarball
@@ -248,11 +264,6 @@ func (r uncompressedReadCloser) Close() error {
return res
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *Source) HasThreadSafeGetBlob() bool {
- return true
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -291,7 +302,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
if err != nil {
- return nil, 0, errors.Wrapf(err, "auto-decompressing blob %s", info.Digest)
+ return nil, 0, fmt.Errorf("auto-decompressing blob %s: %w", info.Digest, err)
}
newStream := uncompressedReadCloser{
@@ -304,27 +315,5 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
return newStream, li.size, nil
}
- return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
-}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
- return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
- }
- return [][]byte{}, nil
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as the primary manifest can not be a list, so there can be no secondary manifests.
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
+ return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest)
}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
index 255f0d354..f6ee041c4 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
@@ -4,6 +4,7 @@ import (
"archive/tar"
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
"os"
@@ -15,7 +16,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -72,7 +72,7 @@ func (w *Writer) unlock() {
// The caller must have locked the Writer.
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
+ return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
if blob, ok := w.blobs[info.Digest]; ok {
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
@@ -94,16 +94,16 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
// See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest)
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
- return errors.Wrap(err, "creating layer symbolic link")
+ return fmt.Errorf("creating layer symbolic link: %w", err)
}
b := []byte("1.0")
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
- return errors.Wrap(err, "writing VERSION file")
+ return fmt.Errorf("writing VERSION file: %w", err)
}
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
- return errors.Wrap(err, "writing config json file")
+ return fmt.Errorf("writing config json file: %w", err)
}
w.legacyLayers[layerID] = struct{}{}
@@ -128,7 +128,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
var config map[string]*json.RawMessage
err := json.Unmarshal(configBytes, &config)
if err != nil {
- return errors.Wrap(err, "unmarshaling config")
+ return fmt.Errorf("unmarshaling config: %w", err)
}
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
layerConfig[attr] = config[attr]
@@ -152,7 +152,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
layerConfig["layer_id"] = chainID
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
if err != nil {
- return errors.Wrap(err, "marshaling layer config")
+ return fmt.Errorf("marshaling layer config: %w", err)
}
delete(layerConfig, "layer_id")
layerID := digest.Canonical.FromBytes(b).Hex()
@@ -160,7 +160,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
configBytes, err := json.Marshal(layerConfig)
if err != nil {
- return errors.Wrap(err, "marshaling layer config")
+ return fmt.Errorf("marshaling layer config: %w", err)
}
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
@@ -280,10 +280,10 @@ func (w *Writer) Close() error {
b, err = json.Marshal(w.repositories)
if err != nil {
- return errors.Wrap(err, "marshaling repositories")
+ return fmt.Errorf("marshaling repositories: %w", err)
}
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
- return errors.Wrap(err, "writing config json file")
+ return fmt.Errorf("writing config json file: %w", err)
}
if err := w.tar.Close(); err != nil {
@@ -375,7 +375,7 @@ func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reade
return err
}
if size != expectedSize {
- return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
+ return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}
diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
index 94e9e5f23..5d42c3870 100644
--- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
+++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
@@ -1,10 +1,11 @@
package policyconfiguration
import (
+ "errors"
+ "fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/pkg/errors"
)
// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup,
@@ -16,9 +17,9 @@ func DockerReferenceIdentity(ref reference.Named) (string, error) {
digested, isDigested := ref.(reference.Canonical)
switch {
case isTagged && isDigested: // Note that this CAN actually happen.
- return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
+ return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
- return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
+ return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
case isTagged:
res = res + ":" + tagged.Tag()
case isDigested:
diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/registries_d.go
index 3294d7def..37087dd85 100644
--- a/vendor/github.com/containers/image/v5/docker/lookaside.go
+++ b/vendor/github.com/containers/image/v5/docker/registries_d.go
@@ -1,6 +1,7 @@
package docker
import (
+ "errors"
"fmt"
"net/url"
"os"
@@ -14,7 +15,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -30,10 +30,10 @@ const builtinRegistriesDirPath = etcDir + "/containers/registries.d"
// userRegistriesDirPath is the path to the per user registries.d.
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
-// defaultUserDockerDir is the default sigstore directory for unprivileged user
+// defaultUserDockerDir is the default lookaside directory for unprivileged user
var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore")
-// defaultDockerDir is the default sigstore directory for root
+// defaultDockerDir is the default lookaside directory for root
var defaultDockerDir = "/var/lib/containers/sigstore"
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
@@ -46,51 +46,39 @@ type registryConfiguration struct {
// registryNamespace defines lookaside locations for a single namespace.
type registryNamespace struct {
- SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
- SigStoreStaging string `json:"sigstore-staging"` // For writing only.
+ Lookaside string `json:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
+ LookasideStaging string `json:"lookaside-staging"` // For writing only.
+ SigStore string `json:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
+ SigStoreStaging string `json:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
+ UseSigstoreAttachments *bool `json:"use-sigstore-attachments,omitempty"`
}
-// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
-// Users outside of this file should use SignatureStorageBaseURL and signatureStorageURL below.
-type signatureStorageBase *url.URL
+// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage.
+// Users outside of this file should use SignatureStorageBaseURL and lookasideStorageURL below.
+type lookasideStorageBase *url.URL
-// SignatureStorageBaseURL reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
+// SignatureStorageBaseURL reads configuration to find an appropriate lookaside storage URL for ref, for write access if “write”.
// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
// Warning: This function only exposes configuration in registries.d;
// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S).
func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) {
dr, ok := ref.(dockerReference)
if !ok {
- return nil, errors.Errorf("ref must be a dockerReference")
+ return nil, errors.New("ref must be a dockerReference")
}
- // FIXME? Loading and parsing the config could be cached across calls.
- dirPath := registriesDirPath(sys)
- logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
- config, err := loadAndMergeConfig(dirPath)
+ config, err := loadRegistryConfiguration(sys)
if err != nil {
return nil, err
}
- topLevel := config.signatureTopLevel(dr, write)
- var url *url.URL
- if topLevel != "" {
- url, err = url.Parse(topLevel)
- if err != nil {
- return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
- }
- } else {
- // returns default directory if no sigstore specified in configuration file
- url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID())
- logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted())
- }
- // NOTE: Keep this in sync with docs/signature-protocols.md!
- // FIXME? Restrict to explicitly supported schemes?
- repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
- if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
- return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
- }
- url.Path = url.Path + "/" + repo
- return url, nil
+ return config.lookasideStorageBaseURL(dr, write)
+}
+
+// loadRegistryConfiguration returns a registryConfiguration appropriate for sys.
+func loadRegistryConfiguration(sys *types.SystemContext) (*registryConfiguration, error) {
+ dirPath := registriesDirPath(sys)
+ logrus.Debugf(`Using registries.d directory %s`, dirPath)
+ return loadAndMergeConfig(dirPath)
}
// registriesDirPath returns a path to registries.d
@@ -115,15 +103,8 @@ func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
return systemRegistriesDirPath
}
-// builtinDefaultSignatureStorageDir returns default signature storage URL as per euid
-func builtinDefaultSignatureStorageDir(euid int) *url.URL {
- if euid != 0 {
- return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
- }
- return &url.URL{Scheme: "file", Path: defaultDockerDir}
-}
-
// loadAndMergeConfig loads configuration files in dirPath
+// FIXME: Probably rename to loadRegistryConfigurationForPath
func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
dockerDefaultMergedFrom := ""
@@ -153,12 +134,12 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
var config registryConfiguration
err = yaml.Unmarshal(configBytes, &config)
if err != nil {
- return nil, errors.Wrapf(err, "parsing %s", configPath)
+ return nil, fmt.Errorf("parsing %s: %w", configPath, err)
}
if config.DefaultDocker != nil {
if mergedConfig.DefaultDocker != nil {
- return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
+ return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
dockerDefaultMergedFrom, configPath)
}
mergedConfig.DefaultDocker = config.DefaultDocker
@@ -167,7 +148,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
if _, ok := mergedConfig.Docker[nsName]; ok {
- return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
+ return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
nsName, nsMergedFrom[nsName], configPath)
}
mergedConfig.Docker[nsName] = nsConfig
@@ -178,6 +159,40 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
return &mergedConfig, nil
}
+// lookasideStorageBaseURL returns an appropriate signature storage URL for ref, for write access if “write”.
+// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
+func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) {
+ topLevel := config.signatureTopLevel(dr, write)
+ var url *url.URL
+ if topLevel != "" {
+ u, err := url.Parse(topLevel)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err)
+ }
+ url = u
+ } else {
+ // returns default directory if no lookaside specified in configuration file
+ url = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID())
+ logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted())
+ }
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ // FIXME? Restrict to explicitly supported schemes?
+ repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
+ if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
+ return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
+ }
+ url.Path = url.Path + "/" + repo
+ return url, nil
+}
+
+// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid
+func builtinDefaultLookasideStorageDir(euid int) *url.URL {
+ if euid != 0 {
+ return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
+ }
+ return &url.URL{Scheme: "file", Path: defaultDockerDir}
+}
+
// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured.
func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
@@ -185,7 +200,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
// Look for a full match.
identity := ref.PolicyConfigurationIdentity()
if ns, ok := config.Docker[identity]; ok {
- logrus.Debugf(` Using "docker" namespace %s`, identity)
+ logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity)
if url := ns.signatureTopLevel(write); url != "" {
return url
}
@@ -194,7 +209,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
// Look for a match of the possible parent namespaces.
for _, name := range ref.PolicyConfigurationNamespaces() {
if ns, ok := config.Docker[name]; ok {
- logrus.Debugf(` Using "docker" namespace %s`, name)
+ logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name)
if url := ns.signatureTopLevel(write); url != "" {
return url
}
@@ -203,7 +218,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
}
// Look for a default location
if config.DefaultDocker != nil {
- logrus.Debugf(` Using "default-docker" configuration`)
+ logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`)
if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
return url
}
@@ -211,24 +226,67 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
return ""
}
+// config.useSigstoreAttachments returns whether we should look for and write sigstore attachments.
+// for ref.
+func (config *registryConfiguration) useSigstoreAttachments(ref dockerReference) bool {
+ if config.Docker != nil {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if ns, ok := config.Docker[identity]; ok {
+ logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, identity)
+ if ns.UseSigstoreAttachments != nil {
+ return *ns.UseSigstoreAttachments
+ }
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if ns, ok := config.Docker[name]; ok {
+ logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, name)
+ if ns.UseSigstoreAttachments != nil {
+ return *ns.UseSigstoreAttachments
+ }
+ }
+ }
+ }
+ // Look for a default location
+ if config.DefaultDocker != nil {
+ logrus.Debugf(` Sigstore attachments: using "default-docker" configuration`)
+ if config.DefaultDocker.UseSigstoreAttachments != nil {
+ return *config.DefaultDocker.UseSigstoreAttachments
+ }
+ }
+ return false
+}
+
// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
// or "" if nothing has been configured.
func (ns registryNamespace) signatureTopLevel(write bool) string {
- if write && ns.SigStoreStaging != "" {
- logrus.Debugf(` Using %s`, ns.SigStoreStaging)
- return ns.SigStoreStaging
+ if write {
+ if ns.LookasideStaging != "" {
+ logrus.Debugf(` Using "lookaside-staging" %s`, ns.LookasideStaging)
+ return ns.LookasideStaging
+ }
+ if ns.SigStoreStaging != "" {
+ logrus.Debugf(` Using "sigstore-staging" %s`, ns.SigStoreStaging)
+ return ns.SigStoreStaging
+ }
+ }
+ if ns.Lookaside != "" {
+ logrus.Debugf(` Using "lookaside" %s`, ns.Lookaside)
+ return ns.Lookaside
}
if ns.SigStore != "" {
- logrus.Debugf(` Using %s`, ns.SigStore)
+ logrus.Debugf(` Using "sigstore" %s`, ns.SigStore)
return ns.SigStore
}
return ""
}
-// signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
+// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
// base is not nil from the caller
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
+func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL {
url := *base
url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
return &url
diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go
index af78ac1df..8afc40628 100644
--- a/vendor/github.com/containers/image/v5/internal/image/docker_list.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go
@@ -2,32 +2,32 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
list, err := manifest.Schema2ListFromManifest(manblob)
if err != nil {
- return nil, errors.Wrapf(err, "parsing schema2 manifest list")
+ return nil, fmt.Errorf("parsing schema2 manifest list: %w", err)
}
targetManifestDigest, err := list.ChooseInstance(sys)
if err != nil {
- return nil, errors.Wrapf(err, "choosing image instance")
+ return nil, fmt.Errorf("choosing image instance: %w", err)
}
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
if err != nil {
- return nil, errors.Wrapf(err, "fetching target platform image selected from manifest list")
+ return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err)
}
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
if err != nil {
- return nil, errors.Wrap(err, "computing manifest digest")
+ return nil, fmt.Errorf("computing manifest digest: %w", err)
}
if !matches {
- return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
}
return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
index 94f776224..3ef8e144d 100644
--- a/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
@@ -2,13 +2,13 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type manifestSchema1 struct {
@@ -165,22 +165,22 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t
if len(m.m.ExtractedV1Compatibility) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
- return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
+ return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
}
if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
}
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
}
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
}
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
if options.LayerInfos != nil {
if len(options.LayerInfos) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
+ return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
len(options.LayerInfos), len(m.m.FSLayers))
}
convertedLayerUpdates = []types.BlobInfo{}
diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
index 7dfd3c5d8..23a21999a 100644
--- a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
@@ -6,6 +6,7 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"strings"
@@ -16,7 +17,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -99,7 +99,7 @@ func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, erro
func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.configBlob == nil {
if m.src == nil {
- return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
+ return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
if err != nil {
@@ -112,7 +112,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.ConfigDescriptor.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
+ return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
@@ -277,7 +277,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options
haveGzippedEmptyLayer := false
if len(imageConfig.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
- return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
+ return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
}
for v2Index, historyEntry := range imageConfig.History {
parentV1ID = v1ID
@@ -293,10 +293,10 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
if err != nil {
- return nil, errors.Wrap(err, "uploading empty layer")
+ return nil, fmt.Errorf("uploading empty layer: %w", err)
}
if info.Digest != emptyLayerBlobInfo.Digest {
- return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
+ return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
}
haveGzippedEmptyLayer = true
}
@@ -306,7 +306,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options
blobDigest = emptyLayerBlobInfo.Digest
} else {
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
- return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
+ return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
@@ -333,7 +333,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
v1CompatibilityBytes, err := json.Marshal(&fakeImage)
if err != nil {
- return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
+ return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
}
fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
diff --git a/vendor/github.com/containers/image/v5/internal/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go
index 6b5f34538..75e472aa7 100644
--- a/vendor/github.com/containers/image/v5/internal/image/manifest.go
+++ b/vendor/github.com/containers/image/v5/internal/image/manifest.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// genericManifest is an interface for parsing, modifying image manifests and related data.
@@ -107,7 +106,7 @@ func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.Mani
converter, ok := converters[options.ManifestMIMEType]
if !ok {
- return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
+ return nil, fmt.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
}
optionsCopy := options
diff --git a/vendor/github.com/containers/image/v5/internal/image/memory.go b/vendor/github.com/containers/image/v5/internal/image/memory.go
index 4c96b37d8..e22c7aafd 100644
--- a/vendor/github.com/containers/image/v5/internal/image/memory.go
+++ b/vendor/github.com/containers/image/v5/internal/image/memory.go
@@ -2,9 +2,9 @@ package image
import (
"context"
+ "errors"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// memoryImage is a mostly-implementation of types.Image assembled from data
diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go
index af1a90e82..4b74de3e5 100644
--- a/vendor/github.com/containers/image/v5/internal/image/oci.go
+++ b/vendor/github.com/containers/image/v5/internal/image/oci.go
@@ -3,6 +3,7 @@ package image
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"github.com/containers/image/v5/docker/reference"
@@ -13,7 +14,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type manifestOCI1 struct {
@@ -61,7 +61,7 @@ func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.configBlob == nil {
if m.src == nil {
- return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
+ return nil, errors.New("Internal error: neither src nor configBlob set in manifestOCI1")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
if err != nil {
@@ -74,7 +74,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.Config.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
+ return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
}
m.configBlob = blob
}
diff --git a/vendor/github.com/containers/image/v5/internal/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go
index d6e6685b1..f4f76622c 100644
--- a/vendor/github.com/containers/image/v5/internal/image/oci_index.go
+++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go
@@ -2,32 +2,32 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
index, err := manifest.OCI1IndexFromManifest(manblob)
if err != nil {
- return nil, errors.Wrapf(err, "parsing OCI1 index")
+ return nil, fmt.Errorf("parsing OCI1 index: %w", err)
}
targetManifestDigest, err := index.ChooseInstance(sys)
if err != nil {
- return nil, errors.Wrapf(err, "choosing image instance")
+ return nil, fmt.Errorf("choosing image instance: %w", err)
}
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
if err != nil {
- return nil, errors.Wrapf(err, "fetching target platform image selected from image index")
+ return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err)
}
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
if err != nil {
- return nil, errors.Wrap(err, "computing manifest digest")
+ return nil, fmt.Errorf("computing manifest digest: %w", err)
}
if !matches {
- return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
}
return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go
index 8ea0f61b4..0f026501c 100644
--- a/vendor/github.com/containers/image/v5/internal/image/unparsed.go
+++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go
@@ -2,12 +2,15 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// UnparsedImage implements types.UnparsedImage .
@@ -15,13 +18,13 @@ import (
//
// This is publicly visible as c/image/image.UnparsedImage.
type UnparsedImage struct {
- src types.ImageSource
+ src private.ImageSource
instanceDigest *digest.Digest
cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
// A private cache for Manifest(), may be the empty string if guessing failed.
// Valid iff cachedManifest is not nil.
cachedManifestMIMEType string
- cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
+ cachedSignatures []signature.Signature // A private cache for Signatures(); nil if not yet known.
}
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
@@ -32,7 +35,7 @@ type UnparsedImage struct {
// This is publicly visible as c/image/image.UnparsedInstance.
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
return &UnparsedImage{
- src: src,
+ src: imagesource.FromPublic(src),
instanceDigest: instanceDigest,
}
}
@@ -57,10 +60,10 @@ func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
matches, err := manifest.MatchesDigest(m, digest)
if err != nil {
- return nil, "", errors.Wrap(err, "computing manifest digest")
+ return nil, "", fmt.Errorf("computing manifest digest: %w", err)
}
if !matches {
- return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
+ return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest)
}
}
@@ -88,8 +91,25 @@ func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
+ // It would be consistent to make this an internal/unparsedimage/impl.Compat wrapper,
+ // but this is very likely to be the only implementation ever.
+ sigs, err := i.UntrustedSignatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ simpleSigs := [][]byte{}
+ for _, sig := range sigs {
+ if sig, ok := sig.(signature.SimpleSigning); ok {
+ simpleSigs = append(simpleSigs, sig.UntrustedSignature())
+ }
+ }
+ return simpleSigs, nil
+}
+
+// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
if i.cachedSignatures == nil {
- sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
+ sigs, err := i.src.GetSignaturesWithFormat(ctx, i.instanceDigest)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
new file mode 100644
index 000000000..ee34ffdbd
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
@@ -0,0 +1,77 @@
+package impl
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Compat implements the obsolete parts of types.ImageDestination
+// for implementations of private.ImageDestination.
+// See AddCompat below.
+type Compat struct {
+ dest private.ImageDestinationInternalOnly
+}
+
+// AddCompat initializes Compat to implement the obsolete parts of types.ImageDestination
+// for implementations of private.ImageDestination.
+//
+// Use it like this:
+// type yourDestination struct {
+// impl.Compat
+// …
+// }
+// dest := &yourDestination{…}
+// dest.Compat = impl.AddCompat(dest)
+//
+func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
+ return Compat{dest}
+}
+
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
+ Cache: blobinfocache.FromBlobInfoCache(cache),
+ IsConfig: isConfig,
+ })
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
+ Cache: blobinfocache.FromBlobInfoCache(cache),
+ CanSubstitute: canSubstitute,
+ })
+}
+
+// PutSignatures writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ withFormat := []signature.Signature{}
+ for _, sig := range signatures {
+ withFormat = append(withFormat, signature.SimpleSigningFromBlob(sig))
+ }
+ return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go
new file mode 100644
index 000000000..704812e9a
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go
@@ -0,0 +1,72 @@
+package impl
+
+import "github.com/containers/image/v5/types"
+
+// Properties collects properties of an ImageDestination that are constant throughout its lifetime
+// (but might differ across instances).
+type Properties struct {
+ // SupportedManifestMIMETypes tells which manifest MIME types the destination supports.
+ // A empty slice or nil means any MIME type can be tried to upload.
+ SupportedManifestMIMETypes []string
+ // DesiredLayerCompression indicates the kind of compression to apply on layers
+ DesiredLayerCompression types.LayerCompression
+ // AcceptsForeignLayerURLs is false if foreign layers in manifest should be actually
+ // uploaded to the image destination, true otherwise.
+ AcceptsForeignLayerURLs bool
+ // MustMatchRuntimeOS is set to true if the destination can store only images targeted for the current runtime architecture and OS.
+ MustMatchRuntimeOS bool
+ // IgnoresEmbeddedDockerReference is set to true if the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+ // and would prefer to receive an unmodified manifest instead of one modified for the destination.
+ // Does not make a difference if Reference().DockerReference() is nil.
+ IgnoresEmbeddedDockerReference bool
+ // HasThreadSafePutBlob indicates that PutBlob can be executed concurrently.
+ HasThreadSafePutBlob bool
+}
+
+// PropertyMethodsInitialize implements parts of private.ImageDestination corresponding to Properties.
+type PropertyMethodsInitialize struct {
+ // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name.
+
+ vals Properties
+}
+
+// PropertyMethods creates an PropertyMethodsInitialize for vals.
+func PropertyMethods(vals Properties) PropertyMethodsInitialize {
+ return PropertyMethodsInitialize{
+ vals: vals,
+ }
+}
+
+// SupportedManifestMIMETypes tells which manifest mime types the destination supports
+// If an empty slice or nil it's returned, then any mime type can be tried to upload
+func (o PropertyMethodsInitialize) SupportedManifestMIMETypes() []string {
+ return o.vals.SupportedManifestMIMETypes
+}
+
+// DesiredLayerCompression indicates the kind of compression to apply on layers
+func (o PropertyMethodsInitialize) DesiredLayerCompression() types.LayerCompression {
+ return o.vals.DesiredLayerCompression
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (o PropertyMethodsInitialize) AcceptsForeignLayerURLs() bool {
+ return o.vals.AcceptsForeignLayerURLs
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (o PropertyMethodsInitialize) MustMatchRuntimeOS() bool {
+ return o.vals.MustMatchRuntimeOS
+}
+
+// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (o PropertyMethodsInitialize) IgnoresEmbeddedDockerReference() bool {
+ return o.vals.IgnoresEmbeddedDockerReference
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (o PropertyMethodsInitialize) HasThreadSafePutBlob() bool {
+ return o.vals.HasThreadSafePutBlob
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
new file mode 100644
index 000000000..225ea4491
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
@@ -0,0 +1,52 @@
+package stubs
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// NoPutBlobPartialInitialize implements parts of private.ImageDestination
+// for transports that don’t support PutBlobPartial().
+// See NoPutBlobPartial() below.
+type NoPutBlobPartialInitialize struct {
+ transportName string
+}
+
+// NoPutBlobPartial creates a NoPutBlobPartialInitialize for ref.
+func NoPutBlobPartial(ref types.ImageReference) NoPutBlobPartialInitialize {
+ return NoPutBlobPartialRaw(ref.Transport().Name())
+}
+
+// NoPutBlobPartialRaw is the same thing as NoPutBlobPartial, but it can be used
+// in situations where no ImageReference is available.
+func NoPutBlobPartialRaw(transportName string) NoPutBlobPartialInitialize {
+ return NoPutBlobPartialInitialize{
+ transportName: transportName,
+ }
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
+ return false
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
+}
+
+// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.
+type ImplementsPutBlobPartial struct{}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (stub ImplementsPutBlobPartial) SupportsPutBlobPartial() bool {
+ return true
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go
new file mode 100644
index 000000000..7015fd068
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go
@@ -0,0 +1,50 @@
+package stubs
+
+import (
+ "context"
+ "errors"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// NoSignaturesInitialize implements parts of private.ImageDestination
+// for transports that don’t support storing signatures.
+// See NoSignatures() below.
+type NoSignaturesInitialize struct {
+ message string
+}
+
+// NoSignatures creates a NoSignaturesInitialize, failing with message.
+func NoSignatures(message string) NoSignaturesInitialize {
+ return NoSignaturesInitialize{
+ message: message,
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (stub NoSignaturesInitialize) SupportsSignatures(ctx context.Context) error {
+ return errors.New(stub.message)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (stub NoSignaturesInitialize) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ if len(signatures) != 0 {
+ return errors.New(stub.message)
+ }
+ return nil
+}
+
+// SupportsSignatures implements SupportsSignatures() that returns nil.
+// Note that it might be even more useful to return a value dynamically detected based on
+type AlwaysSupportsSignatures struct{}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (stub AlwaysSupportsSignatures) SupportsSignatures(ctx context.Context) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
new file mode 100644
index 000000000..e81eec896
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
@@ -0,0 +1,25 @@
+// Package stubs contains trivial stubs for parts of private.ImageDestination.
+// It can be used from internal/wrapper, so it should not drag in any extra dependencies.
+// Compare with imagedestination/impl, which might require non-trivial implementation work.
+//
+// There are two kinds of stubs:
+// - Pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
+// implementation:
+//
+// type yourDestination struct {
+// stubs.ImplementsPutBlobPartial
+// …
+// }
+// - Stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
+// means that a constructor must be called:
+// type yourDestination struct {
+// stubs.NoPutBlobPartialInitialize
+// …
+// }
+//
+// dest := &yourDestination{
+// …
+// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+// }
+//
+package stubs
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
index 82734a6cd..43575ede3 100644
--- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
@@ -2,13 +2,23 @@ package imagedestination
import (
"context"
- "fmt"
"io"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
)
+// wrapped provides the private.ImageDestination operations
+// for a destination that only implements types.ImageDestination
+type wrapped struct {
+ stubs.NoPutBlobPartialInitialize
+
+ types.ImageDestination
+}
+
// FromPublic(dest) returns an object that provides the private.ImageDestination API
//
// Eventually, we might want to expose this function, and methods of the returned object,
@@ -23,18 +33,11 @@ func FromPublic(dest types.ImageDestination) private.ImageDestination {
if dest2, ok := dest.(private.ImageDestination); ok {
return dest2
}
- return &wrapped{ImageDestination: dest}
-}
-
-// wrapped provides the private.ImageDestination operations
-// for a destination that only implements types.ImageDestination
-type wrapped struct {
- types.ImageDestination
-}
+ return &wrapped{
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()),
-// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
-func (w *wrapped) SupportsPutBlobPartial() bool {
- return false
+ ImageDestination: dest,
+ }
}
// PutBlobWithOptions writes contents of stream and returns data representing the result.
@@ -48,15 +51,6 @@ func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inpu
return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
}
-// PutBlobPartial attempts to create a blob using the data that is already present
-// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
-// It is available only if SupportsPutBlobPartial().
-// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
-// should fall back to PutBlobWithOptions.
-func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
- return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name())
-}
-
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
@@ -67,3 +61,19 @@ func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.Blob
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ simpleSigs := [][]byte{}
+ for _, sig := range signatures {
+ simpleSig, ok := sig.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(sig)
+ }
+ simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature())
+ }
+ return w.PutSignatures(ctx, simpleSigs, instanceDigest)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
new file mode 100644
index 000000000..6f7932916
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
@@ -0,0 +1,54 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// Compat implements the obsolete parts of types.ImageSource
+// for implementations of private.ImageSource.
+// See AddCompat below.
+type Compat struct {
+ src private.ImageSourceInternalOnly
+}
+
+// AddCompat initializes Compat to implement the obsolete parts of types.ImageSource
+// for implementations of private.ImageSource.
+//
+// Use it like this:
+// type yourSource struct {
+// impl.Compat
+// …
+// }
+// src := &yourSource{…}
+// src.Compat = impl.AddCompat(src)
+//
+func AddCompat(src private.ImageSourceInternalOnly) Compat {
+ return Compat{src}
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (c *Compat) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ // Silently ignore signatures with other formats; the caller can’t handle them.
+ // Admittedly callers that want to sync all of the image might want to fail instead; this
+ // way an upgrade of c/image neither breaks them nor adds new functionality.
+ // Alternatively, we could possibly define the old GetSignatures to use the multi-format
+ // signature.Blob representation now, in general, but that could silently break them as well.
+ sigs, err := c.src.GetSignaturesWithFormat(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ simpleSigs := [][]byte{}
+ for _, sig := range sigs {
+ if sig, ok := sig.(signature.SimpleSigning); ok {
+ simpleSigs = append(simpleSigs, sig.UntrustedSignature())
+ }
+ }
+ return simpleSigs, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go
new file mode 100644
index 000000000..d5eae6351
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go
@@ -0,0 +1,23 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing.
+type DoesNotAffectLayerInfosForCopy struct{}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (stub DoesNotAffectLayerInfosForCopy) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go
new file mode 100644
index 000000000..73e8c78e9
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go
@@ -0,0 +1,27 @@
+package impl
+
+// Properties collects properties of an ImageSource that are constant throughout its lifetime
+// (but might differ across instances).
+type Properties struct {
+ // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+ HasThreadSafeGetBlob bool
+}
+
+// PropertyMethodsInitialize implements parts of private.ImageSource corresponding to Properties.
+type PropertyMethodsInitialize struct {
+ // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name.
+
+ vals Properties
+}
+
+// PropertyMethods creates an PropertyMethodsInitialize for vals.
+func PropertyMethods(vals Properties) PropertyMethodsInitialize {
+ return PropertyMethodsInitialize{
+ vals: vals,
+ }
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (o PropertyMethodsInitialize) HasThreadSafeGetBlob() bool {
+ return o.vals.HasThreadSafeGetBlob
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go
new file mode 100644
index 000000000..b3a8c7e88
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go
@@ -0,0 +1,19 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// NoSignatures implements GetSignatures() that returns nothing.
+type NoSignatures struct{}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (stub NoSignatures) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
new file mode 100644
index 000000000..15aee6d42
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
@@ -0,0 +1,52 @@
+package stubs
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// NoGetBlobAtInitialize implements parts of private.ImageSource
+// for transports that don’t support GetBlobAt().
+// See NoGetBlobAt() below.
+type NoGetBlobAtInitialize struct {
+ transportName string
+}
+
+// NoGetBlobAt() creates a NoGetBlobAtInitialize for ref.
+func NoGetBlobAt(ref types.ImageReference) NoGetBlobAtInitialize {
+ return NoGetBlobAtRaw(ref.Transport().Name())
+}
+
+// NoGetBlobAtRaw is the same thing as NoGetBlobAt, but it can be used
+// in situations where no ImageReference is available.
+func NoGetBlobAtRaw(transportName string) NoGetBlobAtInitialize {
+ return NoGetBlobAtInitialize{
+ transportName: transportName,
+ }
+}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool {
+ return false
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
+}
+
+// ImplementsGetBlobAt implements SupportsGetBlobAt() that returns true.
+type ImplementsGetBlobAt struct{}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (stub ImplementsGetBlobAt) SupportsGetBlobAt() bool {
+ return true
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
new file mode 100644
index 000000000..134fd1b53
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
@@ -0,0 +1,25 @@
+// Package stubs contains trivial stubs for parts of private.ImageSource.
+// It can be used from internal/wrapper, so it should not drag in any extra dependencies.
+// Compare with imagesource/impl, which might require non-trivial implementation work.
+//
+// There are two kinds of stubs:
+// - Pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
+// implementation:
+//
+// type yourSource struct {
+// stubs.ImplementsGetBlobAt
+// …
+// }
+// - Stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
+// means that a constructor must be called:
+// type yourSource struct {
+// stubs.NoGetBlobAtInitialize
+// …
+// }
+//
+// dest := &yourSource{
+// …
+// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+// }
+//
+package stubs
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
index fe1be8d9e..886b4e833 100644
--- a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
@@ -2,13 +2,22 @@ package imagesource
import (
"context"
- "fmt"
- "io"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
)
+// wrapped provides the private.ImageSource operations
+// for a source that only implements types.ImageSource
+type wrapped struct {
+ stubs.NoGetBlobAtInitialize
+
+ types.ImageSource
+}
+
// FromPublic(src) returns an object that provides the private.ImageSource API
//
// Eventually, we might want to expose this function, and methods of the returned object,
@@ -23,25 +32,25 @@ func FromPublic(src types.ImageSource) private.ImageSource {
if src2, ok := src.(private.ImageSource); ok {
return src2
}
- return &wrapped{ImageSource: src}
-}
+ return &wrapped{
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(src.Reference()),
-// wrapped provides the private.ImageSource operations
-// for a source that only implements types.ImageSource
-type wrapped struct {
- types.ImageSource
-}
-
-// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
-func (w *wrapped) SupportsGetBlobAt() bool {
- return false
+ ImageSource: src,
+ }
}
-// GetBlobAt returns a sequential channel of readers that contain data for the requested
-// blob chunks, and a channel that might get a single error value.
-// The specified chunks must be not overlapping and sorted by their offset.
-// The readers must be fully consumed, in the order they are returned, before blocking
-// to read the next chunk.
-func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
- return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name())
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (w *wrapped) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ sigs, err := w.GetSignatures(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res := []signature.Signature{}
+ for _, sig := range sigs {
+ res = append(res, signature.SimpleSigningFromBlob(sig))
+ }
+ return res, nil
}
diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
index 49fa410e9..f17d00246 100644
--- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
+++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
@@ -1,9 +1,8 @@
package iolimits
import (
+ "fmt"
"io"
-
- "github.com/pkg/errors"
)
// All constants below are intended to be used as limits for `ReadAtMost`. The
@@ -52,7 +51,7 @@ func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
}
if len(res) > limit {
- return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit)
+ return nil, fmt.Errorf("exceeded maximum allowed size of %d bytes", limit)
}
return res, nil
diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go
index 65788651f..bfd6148ce 100644
--- a/vendor/github.com/containers/image/v5/internal/private/private.go
+++ b/vendor/github.com/containers/image/v5/internal/private/private.go
@@ -5,26 +5,40 @@ import (
"io"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
)
-// ImageSource is an internal extension to the types.ImageSource interface.
-type ImageSource interface {
- types.ImageSource
-
+// ImageSourceInternalOnly is the part of private.ImageSource that is not
+// a part of types.ImageSource.
+type ImageSourceInternalOnly interface {
// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
SupportsGetBlobAt() bool
// BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt().
BlobChunkAccessor
+
+ // GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+ // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+ // (e.g. if the source never returns manifest lists).
+ GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error)
}
-// ImageDestination is an internal extension to the types.ImageDestination
-// interface.
-type ImageDestination interface {
- types.ImageDestination
+// ImageSource is an internal extension to the types.ImageSource interface.
+type ImageSource interface {
+ types.ImageSource
+ ImageSourceInternalOnly
+}
+// ImageDestinationInternalOnly is the part of private.ImageDestination that is not
+// a part of types.ImageDestination.
+type ImageDestinationInternalOnly interface {
// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
SupportsPutBlobPartial() bool
+ // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
+ // on unsupported formats.
// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
@@ -40,7 +54,7 @@ type ImageDestination interface {
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
- PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error)
+ PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error)
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
@@ -50,15 +64,31 @@ type ImageDestination interface {
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
+
+ // PutSignaturesWithFormat writes a set of signatures to the destination.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+ // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+ // MUST be called after PutManifest (signatures may reference manifest contents).
+ PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error
+}
+
+// ImageDestination is an internal extension to the types.ImageDestination
+// interface.
+type ImageDestination interface {
+ types.ImageDestination
+ ImageDestinationInternalOnly
}
// PutBlobOptions are used in PutBlobWithOptions.
type PutBlobOptions struct {
- Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos.
- IsConfig bool // True if the blob is a config
+ Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
+ IsConfig bool // True if the blob is a config
// The following fields are new to internal/private. Users of internal/private MUST fill them in,
// but they also must expect that they will be ignored by types.ImageDestination transports.
+ // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
+ // if they use internal/imagedestination/impl.Compat;
+ // in that case, they will all be consistently zero-valued.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
@@ -66,13 +96,16 @@ type PutBlobOptions struct {
// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
type TryReusingBlobOptions struct {
- Cache types.BlobInfoCache // Cache to use and/or update.
+ Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
// If true, it is allowed to use an equivalent of the desired blob;
// in that case the returned info may not match the input.
CanSubstitute bool
// The following fields are new to internal/private. Users of internal/private MUST fill them in,
// but they also must expect that they will be ignored by types.ImageDestination transports.
+ // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
+ // if they use internal/imagedestination/impl.Compat;
+ // in that case, they will all be consistently zero-valued.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
@@ -104,3 +137,10 @@ type BadPartialRequestError struct {
func (e BadPartialRequestError) Error() string {
return e.Status
}
+
+// UnparsedImage is an internal extension to the types.UnparsedImage interface.
+type UnparsedImage interface {
+ types.UnparsedImage
+ // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+ UntrustedSignatures(ctx context.Context) ([]signature.Signature, error)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/signature.go b/vendor/github.com/containers/image/v5/internal/signature/signature.go
new file mode 100644
index 000000000..ee90b788b
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/signature.go
@@ -0,0 +1,111 @@
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// FIXME FIXME: MIME type? Int? String?
+// An interface with a name, parse methods?
+type FormatID string
+
+const (
+ SimpleSigningFormat FormatID = "simple-signing"
+ SigstoreFormat FormatID = "sigstore-json"
+ // Update also UnsupportedFormatError below
+)
+
+// Signature is an image signature of some kind.
+type Signature interface {
+ FormatID() FormatID
+ // blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+ // Almost everyone should use signature.Blob() instead.
+ blobChunk() ([]byte, error)
+}
+
+// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage.
+func Blob(sig Signature) ([]byte, error) {
+ chunk, err := sig.blobChunk()
+ if err != nil {
+ return nil, err
+ }
+
+ format := sig.FormatID()
+ switch format {
+ case SimpleSigningFormat:
+ // For compatibility with old dir formats:
+ return chunk, nil
+ default:
+ res := []byte{0} // Start with a zero byte to clearly mark this is a binary format, and disambiguate from random text.
+ res = append(res, []byte(format)...)
+ res = append(res, '\n')
+ res = append(res, chunk...)
+ return res, nil
+ }
+}
+
+// FromBlob returns a signature from parsing a blob created by signature.Blob.
+func FromBlob(blob []byte) (Signature, error) {
+ if len(blob) == 0 {
+ return nil, errors.New("empty signature blob")
+ }
+ // Historically we’ve just been using GPG with no identification; try to auto-detect that.
+ switch blob[0] {
+ // OpenPGP "compressed data" wrapping the message
+ case 0xA0, 0xA1, 0xA2, 0xA3, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 8 (tag: compressed data packet); bits 1…0 = length-type (any)
+ 0xC8, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 8 (tag: compressed data packet)
+ // OpenPGP “one-pass signature” starting a signature
+ 0x90, 0x91, 0x92, 0x3d, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 4 (tag: one-pass signature packet); bits 1…0 = length-type (any)
+ 0xC4, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 4 (tag: one-pass signature packet)
+ // OpenPGP signature packet signing the following data
+ 0x88, 0x89, 0x8A, 0x8B, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 2 (tag: signature packet); bits 1…0 = length-type (any)
+ 0xC2: // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 2 (tag: signature packet)
+ return SimpleSigningFromBlob(blob), nil
+
+ // The newer format: binary 0, format name, newline, data
+ case 0x00:
+ blob = blob[1:]
+ newline := bytes.IndexByte(blob, '\n')
+ if newline == -1 {
+ return nil, fmt.Errorf("invalid signature format, missing newline")
+ }
+ formatBytes := blob[:newline]
+ for _, b := range formatBytes {
+ if b < 32 || b >= 0x7F {
+ return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b)
+ }
+ }
+ blobChunk := blob[newline+1:]
+ switch {
+ case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
+ return SimpleSigningFromBlob(blobChunk), nil
+ case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
+ return SigstoreFromBlobChunk(blobChunk)
+ default:
+ return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
+ }
+
+ default:
+ return nil, fmt.Errorf("unrecognized signature format, starting with binary %#x", blob[0])
+ }
+
+}
+
+// UnsupportedFormatError returns an error complaining about sig having an unsupported format.
+func UnsupportedFormatError(sig Signature) error {
+ formatID := sig.FormatID()
+ switch formatID {
+ case SimpleSigningFormat, SigstoreFormat:
+ return fmt.Errorf("unsupported signature format %s", string(formatID))
+ default:
+ return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID))
+ }
+}
+
+// copyByteSlice returns a guaranteed-fresh copy of a byte slice
+// Use this to make sure the underlying data is not shared and can’t be unexpectedly modified.
+func copyByteSlice(s []byte) []byte {
+ res := []byte{}
+ return append(res, s...)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go
new file mode 100644
index 000000000..17342c8b7
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go
@@ -0,0 +1,84 @@
+package signature
+
+import "encoding/json"
+
+const (
+ // from sigstore/cosign/pkg/types.SimpleSigningMediaType
+ SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json"
+ // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey
+ SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature"
+)
+
+// Sigstore is a github.com/cosign/cosign signature.
+// For the persistent-storage format used for blobChunk(), we want
+// a degree of forward compatibility against unexpected field changes
+// (as has happened before), which is why this data type
+// contains just a payload + annotations (including annotations
+// that we don’t recognize or support), instead of individual fields
+// for the known annotations.
+type Sigstore struct {
+ untrustedMIMEType string
+ untrustedPayload []byte
+ untrustedAnnotations map[string]string
+}
+
+// sigstoreJSONRepresentation needs the files to be public, which we don’t want for
+// the main Sigstore type.
+type sigstoreJSONRepresentation struct {
+ UntrustedMIMEType string `json:"mimeType"`
+ UntrustedPayload []byte `json:"payload"`
+ UntrustedAnnotations map[string]string `json:"annotations"`
+}
+
+// SigstoreFromComponents returns a Sigstore object from its components.
+func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
+ return Sigstore{
+ untrustedMIMEType: untrustedMimeType,
+ untrustedPayload: copyByteSlice(untrustedPayload),
+ untrustedAnnotations: copyStringMap(untrustedAnnotations),
+ }
+}
+
+// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
+func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
+ var v sigstoreJSONRepresentation
+ if err := json.Unmarshal(blobChunk, &v); err != nil {
+ return Sigstore{}, err
+ }
+ return SigstoreFromComponents(v.UntrustedMIMEType,
+ v.UntrustedPayload,
+ v.UntrustedAnnotations), nil
+}
+
+func (s Sigstore) FormatID() FormatID {
+ return SigstoreFormat
+}
+
+// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+// Almost everyone should use signature.Blob() instead.
+func (s Sigstore) blobChunk() ([]byte, error) {
+ return json.Marshal(sigstoreJSONRepresentation{
+ UntrustedMIMEType: s.UntrustedMIMEType(),
+ UntrustedPayload: s.UntrustedPayload(),
+ UntrustedAnnotations: s.UntrustedAnnotations(),
+ })
+}
+
+func (s Sigstore) UntrustedMIMEType() string {
+ return s.untrustedMIMEType
+}
+func (s Sigstore) UntrustedPayload() []byte {
+ return copyByteSlice(s.untrustedPayload)
+}
+
+func (s Sigstore) UntrustedAnnotations() map[string]string {
+ return copyStringMap(s.untrustedAnnotations)
+}
+
+func copyStringMap(m map[string]string) map[string]string {
+ res := map[string]string{}
+ for k, v := range m {
+ res[k] = v
+ }
+ return res
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go
new file mode 100644
index 000000000..88b8adad0
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/simple.go
@@ -0,0 +1,27 @@
+package signature
+
+// SimpleSigning is a “simple signing” signature.
+type SimpleSigning struct {
+ untrustedSignature []byte
+}
+
+// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
+func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
+ return SimpleSigning{
+ untrustedSignature: copyByteSlice(blobChunk),
+ }
+}
+
+func (s SimpleSigning) FormatID() FormatID {
+ return SimpleSigningFormat
+}
+
+// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+// Almost everyone should use signature.Blob() instead.
+func (s SimpleSigning) blobChunk() ([]byte, error) {
+ return copyByteSlice(s.untrustedSignature), nil
+}
+
+func (s SimpleSigning) UntrustedSignature() []byte {
+ return copyByteSlice(s.untrustedSignature)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go
new file mode 100644
index 000000000..fe65b1a98
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go
@@ -0,0 +1,38 @@
+package unparsedimage
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+)
+
+// wrapped provides the private.UnparsedImage operations
+// for an object that only implements types.UnparsedImage
+type wrapped struct {
+ types.UnparsedImage
+}
+
+// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API
+func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage {
+ if unparsed2, ok := unparsed.(private.UnparsedImage); ok {
+ return unparsed2
+ }
+ return &wrapped{
+ UnparsedImage: unparsed,
+ }
+}
+
+// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
+ sigs, err := w.Signatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ res := []signature.Signature{}
+ for _, sig := range sigs {
+ res = append(res, signature.SimpleSigningFromBlob(sig))
+ }
+ return res, nil
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
index 6d12c4cec..e1f1fb9d9 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
@@ -2,6 +2,8 @@ package manifest
import (
"encoding/json"
+ "errors"
+ "fmt"
"regexp"
"strings"
"time"
@@ -10,7 +12,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
@@ -58,7 +59,7 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
return nil, err
}
if s1.SchemaVersion != 1 {
- return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
+ return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion)
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType,
allowedFieldFSLayers|allowedFieldHistory); err != nil {
@@ -113,7 +114,7 @@ func (m *Schema1) initialize() error {
m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
for i, h := range m.History {
if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
- return errors.Wrapf(err, "parsing v2s1 history entry %d", i)
+ return fmt.Errorf("parsing v2s1 history entry %d: %w", i, err)
}
}
return nil
@@ -142,7 +143,7 @@ func (m *Schema1) LayerInfos() []LayerInfo {
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
if len(m.FSLayers) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
}
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
for i, info := range layerInfos {
@@ -187,7 +188,7 @@ func (m *Schema1) fixManifestLayers() error {
for _, img := range m.ExtractedV1Compatibility {
// skip IDs that appear after each other, we handle those later
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
- return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
+ return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
}
lastID = img.ID
idmap[lastID] = struct{}{}
@@ -199,7 +200,7 @@ func (m *Schema1) fixManifestLayers() error {
m.History = append(m.History[:i], m.History[i+1:]...)
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
- return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
+ return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
}
}
return nil
@@ -209,7 +210,7 @@ var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
func validateV1ID(id string) error {
if ok := validHex.MatchString(id); !ok {
- return errors.Errorf("image ID %q is invalid", id)
+ return fmt.Errorf("image ID %q is invalid", id)
}
return nil
}
@@ -246,14 +247,14 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
config := []byte(m.History[0].V1Compatibility)
err := json.Unmarshal(config, &s1)
if err != nil {
- return nil, errors.Wrapf(err, "decoding configuration")
+ return nil, fmt.Errorf("decoding configuration: %w", err)
}
// Images created with versions prior to 1.8.3 require us to re-encode the encoded object,
// adding some fields that aren't "omitempty".
if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") {
config, err = json.Marshal(&s1)
if err != nil {
- return nil, errors.Wrapf(err, "re-encoding compat image config %#v", s1)
+ return nil, fmt.Errorf("re-encoding compat image config %#v: %w", s1, err)
}
}
// Build the history.
@@ -280,7 +281,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
raw := make(map[string]*json.RawMessage)
err = json.Unmarshal(config, &raw)
if err != nil {
- return nil, errors.Wrapf(err, "re-decoding compat image config %#v", s1)
+ return nil, fmt.Errorf("re-decoding compat image config %#v: %w", s1, err)
}
// Drop some fields.
delete(raw, "id")
@@ -292,20 +293,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
// Add the history and rootfs information.
rootfs, err := json.Marshal(rootFS)
if err != nil {
- return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
+ return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
}
rawRootfs := json.RawMessage(rootfs)
raw["rootfs"] = &rawRootfs
history, err := json.Marshal(convertedHistory)
if err != nil {
- return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err)
+ return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err)
}
rawHistory := json.RawMessage(history)
raw["history"] = &rawHistory
// Encode the result.
config, err = json.Marshal(raw)
if err != nil {
- return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err)
+ return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err)
}
return config, nil
}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index 8b3fbdd39..e79d0851f 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -9,7 +9,6 @@ import (
"github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
@@ -234,7 +233,7 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
// CompressionAlgorithm that would result in anything other than gzip compression.
func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.LayersDescriptors) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
}
original := m.LayersDescriptors
m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
@@ -246,7 +245,7 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
}
mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
if err != nil {
- return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest)
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
}
m.LayersDescriptors[i].MediaType = mimeType
m.LayersDescriptors[i].Digest = info.Digest
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
index e97dfbd88..7e4cc5183 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
@@ -60,26 +59,26 @@ func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, err
}, nil
}
}
- return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
+ return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(list.Manifests) {
- return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
+ return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
- return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates))
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
list.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
- return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
list.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
- return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType
}
@@ -91,7 +90,7 @@ func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
- return "", errors.Wrapf(err, "getting platform information %#v", ctx)
+ return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
@@ -115,7 +114,7 @@ func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest
func (list *Schema2List) Serialize() ([]byte, error) {
buf, err := json.Marshal(list)
if err != nil {
- return nil, errors.Wrapf(err, "marshaling Schema2List %#v", list)
+ return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
}
return buf, nil
}
@@ -190,7 +189,7 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
Manifests: []Schema2ManifestDescriptor{},
}
if err := json.Unmarshal(manifest, &list); err != nil {
- return nil, errors.Wrapf(err, "unmarshaling Schema2List %q", string(manifest))
+ return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
allowedFieldManifests); err != nil {
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index 11927ab5e..fc325009c 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -12,7 +12,6 @@ import (
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@@ -124,7 +123,7 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers.
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
}
original := m.Layers
m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
@@ -139,7 +138,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
}
mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
if err != nil {
- return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest)
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
}
if info.CryptoOperation == types.Encrypt {
encMediaType, err := getEncryptedMediaType(mimeType)
@@ -163,7 +162,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
func getEncryptedMediaType(mediatype string) (string, error) {
for _, s := range strings.Split(mediatype, "+")[1:] {
if s == "encrypted" {
- return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
+ return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
}
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
@@ -172,14 +171,14 @@ func getEncryptedMediaType(mediatype string) (string, error) {
return mediatype + "+encrypted", nil
}
- return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
+ return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype)
}
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
- return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
+ return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil
diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go
index c4f11e09c..726207b9d 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci_index.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go
@@ -10,7 +10,6 @@ import (
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// OCI1Index is just an alias for the OCI index type, but one which we can
@@ -44,26 +43,26 @@ func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, erro
}, nil
}
}
- return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
+ return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(index.Manifests) {
- return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
+ return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
- return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates))
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
index.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
- return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
index.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
- return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType
}
@@ -75,7 +74,7 @@ func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
- return "", errors.Wrapf(err, "getting platform information %#v", ctx)
+ return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
@@ -108,7 +107,7 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
func (index *OCI1Index) Serialize() ([]byte, error) {
buf, err := json.Marshal(index)
if err != nil {
- return nil, errors.Wrapf(err, "marshaling OCI1Index %#v", index)
+ return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
}
return buf, nil
}
@@ -202,7 +201,7 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
},
}
if err := json.Unmarshal(manifest, &index); err != nil {
- return nil, errors.Wrapf(err, "unmarshaling OCI1Index %q", string(manifest))
+ return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
}
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
allowedFieldManifests); err != nil {
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
index 3d8738db5..f710be10b 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
@@ -2,38 +2,49 @@ package archive
import (
"context"
+ "fmt"
"io"
"os"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type ociArchiveImageDestination struct {
+ impl.Compat
+
ref ociArchiveReference
- unpackedDest types.ImageDestination
+ unpackedDest private.ImageDestination
tempDirRef tempDirOCIRef
}
// newImageDestination returns an ImageDestination for writing to an existing directory.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageDestination, error) {
tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
- return nil, errors.Wrapf(err, "creating oci reference")
+ return nil, fmt.Errorf("creating oci reference: %w", err)
}
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
if err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
- return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
+ return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
return nil, err
}
- return &ociArchiveImageDestination{ref: ref,
- unpackedDest: unpackedDest,
- tempDirRef: tempDirRef}, nil
+ d := &ociArchiveImageDestination{
+ ref: ref,
+ unpackedDest: imagedestination.FromPublic(unpackedDest),
+ tempDirRef: tempDirRef,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
}
// Reference returns the reference used to set up this destination.
@@ -87,29 +98,40 @@ func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
return false
}
-// PutBlob writes contents of stream and returns data representing the result.
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
+ return d.unpackedDest.SupportsPutBlobPartial()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
+func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
}
// PutManifest writes the manifest to the destination.
@@ -121,11 +143,12 @@ func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte,
return d.unpackedDest.PutManifest(ctx, m, instanceDigest)
}
-// PutSignatures writes a set of signatures to the destination.
+// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest)
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest)
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted
@@ -135,7 +158,7 @@ func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatur
// after the directory is made, it is tarred up into a file and the directory is deleted
func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
- return errors.Wrapf(err, "storing image %q", d.ref.image)
+ return fmt.Errorf("storing image %q: %w", d.ref.image, err)
}
// path of directory to tar up
@@ -150,13 +173,13 @@ func tarDirectory(src, dst string) error {
// input is a stream of bytes from the archive of the directory at path
input, err := archive.Tar(src, archive.Uncompressed)
if err != nil {
- return errors.Wrapf(err, "retrieving stream of bytes from %q", src)
+ return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err)
}
// creates the tar file
outFile, err := os.Create(dst)
if err != nil {
- return errors.Wrapf(err, "creating tar file %q", dst)
+ return fmt.Errorf("creating tar file %q: %w", dst, err)
}
defer outFile.Close()
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
index 20b392dc0..e5ad2570e 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
@@ -2,40 +2,51 @@ package archive
import (
"context"
+ "errors"
+ "fmt"
"io"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
ocilayout "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type ociArchiveImageSource struct {
+ impl.Compat
+
ref ociArchiveReference
- unpackedSrc types.ImageSource
+ unpackedSrc private.ImageSource
tempDirRef tempDirOCIRef
}
// newImageSource returns an ImageSource for reading from an existing directory.
// newImageSource untars the file and saves it in a temp directory
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageSource, error) {
tempDirRef, err := createUntarTempDir(sys, ref)
if err != nil {
- return nil, errors.Wrap(err, "creating temp directory")
+ return nil, fmt.Errorf("creating temp directory: %w", err)
}
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
if err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
- return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
+ return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
return nil, err
}
- return &ociArchiveImageSource{ref: ref,
- unpackedSrc: unpackedSrc,
- tempDirRef: tempDirRef}, nil
+ s := &ociArchiveImageSource{
+ ref: ref,
+ unpackedSrc: imagesource.FromPublic(unpackedSrc),
+ tempDirRef: tempDirRef,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// LoadManifestDescriptor loads the manifest
@@ -48,11 +59,11 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor,
func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
ociArchRef, ok := imgRef.(ociArchiveReference)
if !ok {
- return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference")
+ return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociArchiveReference")
}
tempDirRef, err := createUntarTempDir(sys, ociArchRef)
if err != nil {
- return imgspecv1.Descriptor{}, errors.Wrap(err, "creating temp directory")
+ return imgspecv1.Descriptor{}, fmt.Errorf("creating temp directory: %w", err)
}
defer func() {
err := tempDirRef.deleteTempDir()
@@ -61,7 +72,7 @@ func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.Im
descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
if err != nil {
- return imgspecv1.Descriptor{}, errors.Wrap(err, "loading index")
+ return imgspecv1.Descriptor{}, fmt.Errorf("loading index: %w", err)
}
return descriptor, nil
}
@@ -101,12 +112,26 @@ func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo
return s.unpackedSrc.GetBlob(ctx, info, cache)
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (s *ociArchiveImageSource) SupportsGetBlobAt() bool {
+ return s.unpackedSrc.SupportsGetBlobAt()
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- return s.unpackedSrc.GetSignatures(ctx, instanceDigest)
+func (s *ociArchiveImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return s.unpackedSrc.GetSignaturesWithFormat(ctx, instanceDigest)
}
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
index 74fefbd4f..53371796f 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
@@ -2,6 +2,7 @@ package archive
import (
"context"
+ "errors"
"fmt"
"os"
"strings"
@@ -15,7 +16,6 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
)
func init() {
@@ -139,7 +139,7 @@ func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *typ
// DeleteImage deletes the named image from the registry, if supported.
func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for oci: images")
+ return errors.New("Deleting images not implemented for oci: images")
}
// struct to store the ociReference and temporary directory returned by createOCIRef
@@ -158,7 +158,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
if err != nil {
- return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory")
+ return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err)
}
ociRef, err := ocilayout.NewReference(dir, image)
if err != nil {
@@ -173,7 +173,7 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error)
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
- return tempDirOCIRef{}, errors.Wrap(err, "creating oci reference")
+ return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err)
}
src := ref.resolvedFile
dst := tempDirRef.tempDirectory
@@ -185,9 +185,9 @@ func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (temp
defer arch.Close()
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
- return tempDirOCIRef{}, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
+ return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
- return tempDirOCIRef{}, errors.Wrapf(err, "untarring file %q", tempDirRef.tempDirectory)
+ return tempDirOCIRef{}, fmt.Errorf("untarring file %q: %w", tempDirRef.tempDirectory, err)
}
return tempDirRef, nil
}
diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
index c2012e50e..148bc12fa 100644
--- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
+++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
@@ -1,7 +1,8 @@
package internal
import (
- "github.com/pkg/errors"
+ "errors"
+ "fmt"
"path/filepath"
"regexp"
"runtime"
@@ -27,7 +28,7 @@ func ValidateImageName(image string) error {
var err error
if !refRegexp.MatchString(image) {
- err = errors.Errorf("Invalid image %s", image)
+ err = fmt.Errorf("Invalid image %s", image)
}
return err
}
@@ -72,11 +73,11 @@ func ValidateOCIPath(path string) error {
if runtime.GOOS == "windows" {
// On Windows we must allow for a ':' as part of the path
if strings.Count(path, ":") > 1 {
- return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
+ return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
}
} else {
if strings.Contains(path, ":") {
- return errors.Errorf("Invalid OCI reference: path %s contains a colon", path)
+ return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path)
}
}
return nil
@@ -96,7 +97,7 @@ func ValidateScope(scope string) error {
cleaned := filepath.Clean(scope)
if cleaned != scope {
- return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
return nil
@@ -105,7 +106,7 @@ func ValidateScope(scope string) error {
func validateScopeWindows(scope string) error {
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
if !matched {
- return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
}
return nil
@@ -113,7 +114,7 @@ func validateScopeWindows(scope string) error {
func validateScopeNonWindows(scope string) error {
if !strings.HasPrefix(scope, "/") {
- return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope %s: must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
index 77e8fd876..4face7213 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
@@ -3,29 +3,37 @@ package layout
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
"os"
"path/filepath"
"runtime"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type ociImageDestination struct {
- ref ociReference
- index imgspecv1.Index
- sharedBlobDir string
- acceptUncompressedLayers bool
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.NoSignaturesInitialize
+
+ ref ociReference
+ index imgspecv1.Index
+ sharedBlobDir string
}
// newImageDestination returns an ImageDestination for writing to an existing directory.
-func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) {
+func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
var index *imgspecv1.Index
if indexExists(ref) {
var err error
@@ -42,10 +50,32 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag
}
}
- d := &ociImageDestination{ref: ref, index: *index}
+ desiredLayerCompression := types.Compress
+ if sys != nil && sys.OCIAcceptUncompressedLayers {
+ desiredLayerCompression = types.PreserveOriginal
+ }
+
+ d := &ociImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
+ },
+ DesiredLayerCompression: desiredLayerCompression,
+ AcceptsForeignLayerURLs: true,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+ NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"),
+
+ ref: ref,
+ index: *index,
+ }
+ d.Compat = impl.AddCompat(d)
if sys != nil {
d.sharedBlobDir = sys.OCISharedBlobDirPath
- d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers
}
if err := ensureDirectoryExists(d.ref.dir); err != nil {
@@ -71,58 +101,14 @@ func (d *ociImageDestination) Close() error {
return nil
}
-func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
- return []string{
- imgspecv1.MediaTypeImageManifest,
- imgspecv1.MediaTypeImageIndex,
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error {
- return errors.Errorf("Pushing signatures for OCI images is not supported")
-}
-
-func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression {
- if d.acceptUncompressedLayers {
- return types.PreserveOriginal
- }
- return types.Compress
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
- return true
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *ociImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *ociImageDestination) HasThreadSafePutBlob() bool {
- return true
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -146,7 +132,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
@@ -180,18 +166,16 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
@@ -290,16 +274,6 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
d.index.Manifests = append(d.index.Manifests, *desc)
}
-// PutSignatures would add the given signatures to the oci layout (currently not supported).
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
-// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- if len(signatures) != 0 {
- return errors.Errorf("Pushing signatures for OCI images is not supported")
- }
- return nil
-}
-
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
index 8973f461c..b2d963b01 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
@@ -2,22 +2,32 @@ package layout
import (
"context"
+ "errors"
+ "fmt"
"io"
"net/http"
"net/url"
"os"
"strconv"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/docker/go-connections/tlsconfig"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type ociImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
ref ociReference
index *imgspecv1.Index
descriptor imgspecv1.Descriptor
@@ -26,7 +36,7 @@ type ociImageSource struct {
}
// newImageSource returns an ImageSource for reading from an existing directory.
-func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) {
+func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) {
tr := tlsclientconfig.NewTransport()
tr.TLSClientConfig = tlsconfig.ServerDefault()
@@ -47,12 +57,23 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSour
if err != nil {
return nil, err
}
- d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client}
+ s := &ociImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ index: index,
+ descriptor: descriptor,
+ client: client,
+ }
if sys != nil {
// TODO(jonboulle): check dir existence?
- d.sharedBlobDir = sys.OCISharedBlobDirPath
+ s.sharedBlobDir = sys.OCISharedBlobDirPath
}
- return d, nil
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// Reference returns the reference used to set up this source.
@@ -103,11 +124,6 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *ociImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -137,14 +153,6 @@ func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
return r, fi.Size(), nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- return [][]byte{}, nil
-}
-
// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
// This function can return nil reader when no url is supported by this function. In this case, the caller
// should fallback to fetch the non-external blob (i.e. pull from the registry).
@@ -162,19 +170,19 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
hasSupportedURL = true
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
- errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error())
+ errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
continue
}
resp, err := s.client.Do(req)
if err != nil {
- errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error())
+ errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
continue
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
- errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", u)
+ errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap)
continue
}
@@ -187,18 +195,6 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
return nil, 0, errWrap
}
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
-
func getBlobSize(resp *http.Response) int64 {
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
index a9029a609..be22bed6d 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
@@ -3,6 +3,7 @@ package layout
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -16,7 +17,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
func init() {
@@ -215,7 +215,7 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
ociRef, ok := imgRef.(ociReference)
if !ok {
- return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef")
+ return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef")
}
return ociRef.getManifestDescriptor()
}
@@ -234,7 +234,7 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst
// DeleteImage deletes the named image from the registry, if supported.
func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for oci: images")
+ return errors.New("Deleting images not implemented for oci: images")
}
// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions.
@@ -250,7 +250,7 @@ func (ref ociReference) indexPath() string {
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) {
if err := digest.Validate(); err != nil {
- return "", errors.Wrapf(err, "unexpected digest reference %s", digest)
+ return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err)
}
blobDir := filepath.Join(ref.dir, "blobs")
if sharedBlobDir != "" {
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index a6473ae68..8df1bfc8b 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
@@ -4,6 +4,7 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/json"
+ "errors"
"fmt"
"net"
"net/http"
@@ -18,7 +19,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/imdario/mergo"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
)
@@ -354,19 +354,19 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
if len(clusterInfo.Server) == 0 {
if len(clusterName) == 0 {
- validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined"))
+ validationErrors = append(validationErrors, errors.New("default cluster has no server defined"))
} else {
- validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName))
+ validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
}
}
// Make sure CA data and CA file aren't both specified
if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
+ validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
}
if len(clusterInfo.CertificateAuthority) != 0 {
err := validateFileIsReadable(clusterInfo.CertificateAuthority)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
}
}
@@ -390,34 +390,34 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
// Make sure cert data and file aren't both specified
if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
}
// Make sure key data and file aren't both specified
if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
}
// Make sure a key is specified
if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
}
if len(authInfo.ClientCertificate) != 0 {
err := validateFileIsReadable(authInfo.ClientCertificate)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
}
}
if len(authInfo.ClientKey) != 0 {
err := validateFileIsReadable(authInfo.ClientKey)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
}
}
}
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
if (len(methods) > 1) && (!usingAuthPath) {
- validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
+ validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
}
return validationErrors
@@ -578,7 +578,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
continue
}
if err != nil {
- errlist = append(errlist, errors.Wrapf(err, "loading config file \"%s\"", filename))
+ errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err))
continue
}
@@ -691,7 +691,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
}
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
if err != nil {
- return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin)
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %w", cluster.LocationOfOrigin, err)
}
if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
@@ -704,7 +704,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
}
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
if err != nil {
- return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin)
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %w", authInfo.LocationOfOrigin, err)
}
if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
@@ -774,7 +774,7 @@ func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
// Kubernetes API.
func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
if host == "" {
- return nil, errors.Errorf("host must be a URL or a host:port pair")
+ return nil, errors.New("host must be a URL or a host:port pair")
}
base := host
hostURL, err := url.Parse(base)
@@ -791,7 +791,7 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
return nil, err
}
if hostURL.Path != "" && hostURL.Path != "/" {
- return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
+ return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base)
}
}
@@ -861,7 +861,7 @@ func transportNew(config *restConfig) (http.RoundTripper, error) {
// REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
if len(config.Username) != 0 && len(config.BearerToken) != 0 {
- return nil, errors.Errorf("username/password or bearer token may be set, but not both")
+ return nil, errors.New("username/password or bearer token may be set, but not both")
}
return rt, nil
@@ -954,7 +954,7 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) {
return nil, nil
}
if c.HasCA() && c.Insecure {
- return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed")
+ return nil, errors.New("specifying a root certificates file with the insecure flag is not allowed")
}
if err := loadTLSFiles(c); err != nil {
return nil, err
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go
index 67612d800..b2e4dfd9e 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift.go
@@ -3,22 +3,17 @@ package openshift
import (
"bytes"
"context"
- "crypto/rand"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
- "github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
"github.com/containers/image/v5/version"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -126,7 +121,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
if statusValid {
return nil, errors.New(status.Message)
}
- return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
+ return nil, fmt.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
}
return body, nil
@@ -153,368 +148,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
parts := strings.SplitN(ref, "/", 2)
if len(parts) != 2 {
- return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref)
+ return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
}
return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil
}
-type openshiftImageSource struct {
- client *openshiftClient
- // Values specific to this image
- sys *types.SystemContext
- // State
- docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet
- imageStreamImageName string // Resolved image identifier, or "" if not known yet
-}
-
-// newImageSource creates a new ImageSource for the specified reference.
-// The caller must call .Close() on the returned ImageSource.
-func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) {
- client, err := newOpenshiftClient(ref)
- if err != nil {
- return nil, err
- }
-
- return &openshiftImageSource{
- client: client,
- sys: sys,
- }, nil
-}
-
-// Reference returns the reference used to set up this source, _as specified by the user_
-// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
-func (s *openshiftImageSource) Reference() types.ImageReference {
- return s.client.ref
-}
-
-// Close removes resources associated with an initialized ImageSource, if any.
-func (s *openshiftImageSource) Close() error {
- if s.docker != nil {
- err := s.docker.Close()
- s.docker = nil
-
- return err
- }
-
- return nil
-}
-
-// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
-// It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
-// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
-func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, "", err
- }
- return s.docker.GetManifest(ctx, instanceDigest)
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
-// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
-// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
-func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, 0, err
- }
- return s.docker.GetBlob(ctx, info, cache)
-}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- var imageStreamImageName string
- if instanceDigest == nil {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, err
- }
- imageStreamImageName = s.imageStreamImageName
- } else {
- imageStreamImageName = instanceDigest.String()
- }
- image, err := s.client.getImage(ctx, imageStreamImageName)
- if err != nil {
- return nil, err
- }
- var sigs [][]byte
- for _, sig := range image.Signatures {
- if sig.Type == imageSignatureTypeAtomic {
- sigs = append(sigs, sig.Content)
- }
- }
- return sigs, nil
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
-
-// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
-func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
- if s.docker != nil {
- return nil
- }
-
- // FIXME: validate components per validation.IsValidPathSegmentName?
- path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
- body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
- if err != nil {
- return err
- }
- // Note: This does absolutely no kind/version checking or conversions.
- var is imageStream
- if err := json.Unmarshal(body, &is); err != nil {
- return err
- }
- var te *tagEvent
- for _, tag := range is.Status.Tags {
- if tag.Tag != s.client.ref.dockerReference.Tag() {
- continue
- }
- if len(tag.Items) > 0 {
- te = &tag.Items[0]
- break
- }
- }
- if te == nil {
- return errors.Errorf("No matching tag found")
- }
- logrus.Debugf("tag event %#v", te)
- dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
- if err != nil {
- return err
- }
- logrus.Debugf("Resolved reference %#v", dockerRefString)
- dockerRef, err := docker.ParseReference("//" + dockerRefString)
- if err != nil {
- return err
- }
- d, err := dockerRef.NewImageSource(ctx, s.sys)
- if err != nil {
- return err
- }
- s.docker = d
- s.imageStreamImageName = te.Image
- return nil
-}
-
-type openshiftImageDestination struct {
- client *openshiftClient
- docker types.ImageDestination // The docker/distribution API endpoint
- // State
- imageStreamImageName string // "" if not yet known
-}
-
-// newImageDestination creates a new ImageDestination for the specified reference.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) {
- client, err := newOpenshiftClient(ref)
- if err != nil {
- return nil, err
- }
-
- // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
- // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
- // the manifest digest at this point.
- dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
- dockerRef, err := docker.ParseReference(dockerRefString)
- if err != nil {
- return nil, err
- }
- docker, err := dockerRef.NewImageDestination(ctx, sys)
- if err != nil {
- return nil, err
- }
-
- return &openshiftImageDestination{
- client: client,
- docker: docker,
- }, nil
-}
-
-// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
-// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
-func (d *openshiftImageDestination) Reference() types.ImageReference {
- return d.client.ref
-}
-
-// Close removes resources associated with an initialized ImageDestination, if any.
-func (d *openshiftImageDestination) Close() error {
- return d.docker.Close()
-}
-
-func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
- return d.docker.SupportedManifestMIMETypes()
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Compress
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
- return true
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
- return d.docker.IgnoresEmbeddedDockerReference()
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
-// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
-// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
-// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
-}
-
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
-}
-
-// PutManifest writes manifest to the destination.
-// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
-// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
-// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
- if instanceDigest == nil {
- manifestDigest, err := manifest.Digest(m)
- if err != nil {
- return err
- }
- d.imageStreamImageName = manifestDigest.String()
- }
- return d.docker.PutManifest(ctx, m, instanceDigest)
-}
-
-func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- var imageStreamImageName string
- if instanceDigest == nil {
- if d.imageStreamImageName == "" {
- return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
- }
- imageStreamImageName = d.imageStreamImageName
- } else {
- imageStreamImageName = instanceDigest.String()
- }
-
- // Because image signatures are a shared resource in Atomic Registry, the default upload
- // always adds signatures. Eventually we should also allow removing signatures.
-
- if len(signatures) == 0 {
- return nil // No need to even read the old state.
- }
-
- image, err := d.client.getImage(ctx, imageStreamImageName)
- if err != nil {
- return err
- }
- existingSigNames := map[string]struct{}{}
- for _, sig := range image.Signatures {
- existingSigNames[sig.objectMeta.Name] = struct{}{}
- }
-
-sigExists:
- for _, newSig := range signatures {
- for _, existingSig := range image.Signatures {
- if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
- continue sigExists
- }
- }
-
- // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
- var signatureName string
- for {
- randBytes := make([]byte, 16)
- n, err := rand.Read(randBytes)
- if err != nil || n != 16 {
- return errors.Wrapf(err, "generating random signature len %d", n)
- }
- signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
- if _, ok := existingSigNames[signatureName]; !ok {
- break
- }
- }
- // Note: This does absolutely no kind/version checking or conversions.
- sig := imageSignature{
- typeMeta: typeMeta{
- Kind: "ImageSignature",
- APIVersion: "v1",
- },
- objectMeta: objectMeta{Name: signatureName},
- Type: imageSignatureTypeAtomic,
- Content: newSig,
- }
- body, err := json.Marshal(sig)
- if err != nil {
- return err
- }
- _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
-// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- return d.docker.Commit(ctx, unparsedToplevel)
-}
-
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
type imageStream struct {
Status imageStreamStatus `json:"status,omitempty"`
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
new file mode 100644
index 000000000..d5dbaf27e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
@@ -0,0 +1,247 @@
+package openshift
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+type openshiftImageDestination struct {
+ impl.Compat
+ stubs.AlwaysSupportsSignatures
+
+ client *openshiftClient
+ docker private.ImageDestination // The docker/distribution API endpoint
+ // State
+ imageStreamImageName string // "" if not yet known
+}
+
+// newImageDestination creates a new ImageDestination for the specified reference.
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (private.ImageDestination, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
+ // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
+ // the manifest digest at this point.
+ dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
+ dockerRef, err := docker.ParseReference(dockerRefString)
+ if err != nil {
+ return nil, err
+ }
+ docker, err := dockerRef.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, err
+ }
+
+ d := &openshiftImageDestination{
+ client: client,
+ docker: imagedestination.FromPublic(docker),
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *openshiftImageDestination) Reference() types.ImageReference {
+ return d.client.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *openshiftImageDestination) Close() error {
+ return d.docker.Close()
+}
+
+func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
+ return d.docker.SupportedManifestMIMETypes()
+}
+
+func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
+ return types.Compress
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.docker.IgnoresEmbeddedDockerReference()
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
+ return d.docker.SupportsPutBlobPartial()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ return d.docker.TryReusingBlobWithOptions(ctx, info, options)
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest == nil {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.imageStreamImageName = manifestDigest.String()
+ }
+ return d.docker.PutManifest(ctx, m, instanceDigest)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ var imageStreamImageName string
+ if instanceDigest == nil {
+ if d.imageStreamImageName == "" {
+ return errors.New("Internal error: Unknown manifest digest, can't add signatures")
+ }
+ imageStreamImageName = d.imageStreamImageName
+ } else {
+ imageStreamImageName = instanceDigest.String()
+ }
+
+ // Because image signatures are a shared resource in Atomic Registry, the default upload
+ // always adds signatures. Eventually we should also allow removing signatures.
+
+ if len(signatures) == 0 {
+ return nil // No need to even read the old state.
+ }
+
+ image, err := d.client.getImage(ctx, imageStreamImageName)
+ if err != nil {
+ return err
+ }
+ existingSigNames := map[string]struct{}{}
+ for _, sig := range image.Signatures {
+ existingSigNames[sig.objectMeta.Name] = struct{}{}
+ }
+
+sigExists:
+ for _, newSigWithFormat := range signatures {
+ newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(newSigWithFormat)
+ }
+ newSig := newSigSimple.UntrustedSignature()
+
+ for _, existingSig := range image.Signatures {
+ if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
+ continue sigExists
+ }
+ }
+
+ // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
+ var signatureName string
+ for {
+ randBytes := make([]byte, 16)
+ n, err := rand.Read(randBytes)
+ if err != nil || n != 16 {
+ return fmt.Errorf("generating random signature len %d: %w", n, err)
+ }
+ signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
+ if _, ok := existingSigNames[signatureName]; !ok {
+ break
+ }
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ sig := imageSignature{
+ typeMeta: typeMeta{
+ Kind: "ImageSignature",
+ APIVersion: "v1",
+ },
+ objectMeta: objectMeta{Name: signatureName},
+ Type: imageSignatureTypeAtomic,
+ Content: newSig,
+ }
+ body, err := json.Marshal(sig)
+ if err != nil {
+ return err
+ }
+ _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ return d.docker.Commit(ctx, unparsedToplevel)
+}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_src.go b/vendor/github.com/containers/image/v5/openshift/openshift_src.go
new file mode 100644
index 000000000..93ba8d10e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_src.go
@@ -0,0 +1,173 @@
+package openshift
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+type openshiftImageSource struct {
+ impl.Compat
+ impl.DoesNotAffectLayerInfosForCopy
+ // This is slightly suboptimal. We could forward GetBlobAt(), but we need to call ensureImageIsResolved in SupportsGetBlobAt(),
+ // and that method doesn’t provide a context for timing out. That could actually be fixed (SupportsGetBlobAt is private and we
+ // can change it), but this is a deprecated transport anyway, so for now we just punt.
+ stubs.NoGetBlobAtInitialize
+
+ client *openshiftClient
+ // Values specific to this image
+ sys *types.SystemContext
+ // State
+ docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet
+ imageStreamImageName string // Resolved image identifier, or "" if not known yet
+}
+
+// newImageSource creates a new ImageSource for the specified reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(sys *types.SystemContext, ref openshiftReference) (private.ImageSource, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &openshiftImageSource{
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ client: client,
+ sys: sys,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *openshiftImageSource) Reference() types.ImageReference {
+ return s.client.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *openshiftImageSource) Close() error {
+ if s.docker != nil {
+ err := s.docker.Close()
+ s.docker = nil
+
+ return err
+ }
+
+ return nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, "", err
+ }
+ return s.docker.GetManifest(ctx, instanceDigest)
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, 0, err
+ }
+ return s.docker.GetBlob(ctx, info, cache)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ var imageStreamImageName string
+ if instanceDigest == nil {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, err
+ }
+ imageStreamImageName = s.imageStreamImageName
+ } else {
+ imageStreamImageName = instanceDigest.String()
+ }
+ image, err := s.client.getImage(ctx, imageStreamImageName)
+ if err != nil {
+ return nil, err
+ }
+ var sigs []signature.Signature
+ for _, sig := range image.Signatures {
+ if sig.Type == imageSignatureTypeAtomic {
+ sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
+ }
+ }
+ return sigs, nil
+}
+
+// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
+func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
+ if s.docker != nil {
+ return nil
+ }
+
+ // FIXME: validate components per validation.IsValidPathSegmentName?
+ path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
+ body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return err
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ var is imageStream
+ if err := json.Unmarshal(body, &is); err != nil {
+ return err
+ }
+ var te *tagEvent
+ for _, tag := range is.Status.Tags {
+ if tag.Tag != s.client.ref.dockerReference.Tag() {
+ continue
+ }
+ if len(tag.Items) > 0 {
+ te = &tag.Items[0]
+ break
+ }
+ }
+ if te == nil {
+ return errors.New("No matching tag found")
+ }
+ logrus.Debugf("tag event %#v", te)
+ dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Resolved reference %#v", dockerRefString)
+ dockerRef, err := docker.ParseReference("//" + dockerRefString)
+ if err != nil {
+ return err
+ }
+ d, err := dockerRef.NewImageSource(ctx, s.sys)
+ if err != nil {
+ return err
+ }
+ s.docker = d
+ s.imageStreamImageName = te.Image
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
index c8d65c78a..f7971a48f 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
@@ -2,6 +2,7 @@ package openshift
import (
"context"
+ "errors"
"fmt"
"regexp"
"strings"
@@ -11,7 +12,6 @@ import (
genericImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func init() {
@@ -43,7 +43,7 @@ var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
// scope passed to this function will not be "", that value is always allowed.
func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
if scopeRegexp.FindStringIndex(scope) == nil {
- return errors.Errorf("Invalid scope name %s", scope)
+ return fmt.Errorf("Invalid scope name %s", scope)
}
return nil
}
@@ -59,11 +59,11 @@ type openshiftReference struct {
func ParseReference(ref string) (types.ImageReference, error) {
r, err := reference.ParseNormalizedNamed(ref)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse image reference %q", ref)
+ return nil, fmt.Errorf("failed to parse image reference %q: %w", ref, err)
}
tagged, ok := r.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
+ return nil, fmt.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
}
return NewReference(tagged)
}
@@ -72,7 +72,7 @@ func ParseReference(ref string) (types.ImageReference, error) {
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
r := strings.SplitN(reference.Path(dockerRef), "/", 3)
if len(r) != 2 {
- return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
+ return nil, fmt.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
reference.FamiliarString(dockerRef))
}
return openshiftReference{
@@ -149,5 +149,5 @@ func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *type
// DeleteImage deletes the named image from the registry, if supported.
func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for atomic: images")
+ return errors.New("Deleting images not implemented for atomic: images")
}
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
index 011118fa5..929523fa6 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
@@ -8,6 +8,7 @@ import (
"context"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"io"
"os"
@@ -20,7 +21,11 @@ import (
"time"
"unsafe"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
@@ -28,7 +33,6 @@ import (
"github.com/opencontainers/go-digest"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
- "github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
@@ -65,6 +69,11 @@ type manifestSchema struct {
}
type ostreeImageDestination struct {
+ compat impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.AlwaysSupportsSignatures
+
ref ostreeReference
manifest string
schema manifestSchema
@@ -76,12 +85,33 @@ type ostreeImageDestination struct {
}
// newImageDestination returns an ImageDestination for writing to an existing ostree.
-func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) {
+func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) {
tmpDirPath = filepath.Join(tmpDirPath, ref.branchName)
if err := ensureDirectoryExists(tmpDirPath); err != nil {
return nil, err
}
- return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil
+ d := &ostreeImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType},
+ DesiredLayerCompression: types.PreserveOriginal,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: true,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
+ manifest: "",
+ schema: manifestSchema{},
+ tmpDirPath: tmpDirPath,
+ blobs: map[string]*blobToImport{},
+ digest: "",
+ signaturesLen: 0,
+ repo: nil,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -98,55 +128,14 @@ func (d *ostreeImageDestination) Close() error {
return os.RemoveAll(d.tmpDirPath)
}
-func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string {
- return []string{
- manifest.DockerV2Schema2MediaType,
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
-func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.PreserveOriginal
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
- return true
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
@@ -167,7 +156,7 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
@@ -213,7 +202,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm))
if int(res) < 0 && err != syscall.ENOENT {
- return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath)
+ return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err)
}
if int(res) == 0 {
defer C.freecon(context)
@@ -221,7 +210,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
defer C.free(unsafe.Pointer(fullpathC))
res, err = C.lsetfilecon_raw(fullpathC, context)
if int(res) < 0 {
- return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context))
+ return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err)
}
}
}
@@ -338,16 +327,14 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
@@ -411,10 +398,11 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [
return os.WriteFile(manifestPath, manifestBlob, 0644)
}
-// PutSignatures writes signatures to the destination.
-// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
-// there can be no secondary manifests.
-func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported by "ostree:"`)
}
@@ -426,7 +414,11 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
for i, sig := range signatures {
signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
- if err := os.WriteFile(signaturePath, sig, 0644); err != nil {
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(signaturePath, blob, 0644); err != nil {
return err
}
}
@@ -453,7 +445,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if os.Getuid() == 0 && selinux.GetEnabled() {
selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0)
if selinuxHnd == nil {
- return errors.Wrapf(err, "cannot open the SELinux DB")
+ return fmt.Errorf("cannot open the SELinux DB: %w", err)
}
defer C.selabel_close(selinuxHnd)
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
index 1e1f2be03..9983acc0a 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
@@ -7,19 +7,23 @@ import (
"bytes"
"context"
"encoding/base64"
+ "errors"
"fmt"
"io"
"strconv"
"strings"
"unsafe"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
- "github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
@@ -34,6 +38,10 @@ import (
import "C"
type ostreeImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoGetBlobAtInitialize
+
ref ostreeReference
tmpDir string
repo *C.struct_OstreeRepo
@@ -42,8 +50,19 @@ type ostreeImageSource struct {
}
// newImageSource returns an ImageSource for reading from an existing directory.
-func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) {
- return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil
+func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) {
+ s := &ostreeImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ tmpDir: tmpDir,
+ compressed: nil,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// Reference returns the reference used to set up this source.
@@ -263,11 +282,6 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path)
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *ostreeImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -339,10 +353,11 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
return rc, layerSize, nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
if instanceDigest != nil {
return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
}
@@ -360,18 +375,23 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d
s.repo = repo
}
- signatures := [][]byte{}
+ signatures := []signature.Signature{}
for i := int64(1); i <= lenSignatures; i++ {
- sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i))
+ path := fmt.Sprintf("/signature-%d", i)
+ sigReader, err := s.readSingleFile(branch, path)
if err != nil {
return nil, err
}
defer sigReader.Close()
- sig, err := os.ReadAll(sigReader)
+ sigBlob, err := io.ReadAll(sigReader)
if err != nil {
return nil, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature %q: %w", path, err)
+ }
signatures = append(signatures, sig)
}
return signatures, nil
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
index 6c4262368..658d4e903 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
@@ -6,6 +6,7 @@ package ostree
import (
"bytes"
"context"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -17,7 +18,6 @@ import (
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
const defaultOSTreeRepo = "/ostree/repo"
@@ -42,16 +42,16 @@ func init() {
func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error {
sep := strings.Index(scope, ":")
if sep < 0 {
- return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
+ return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
}
repo := scope[:sep]
if !strings.HasPrefix(repo, "/") {
- return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
+ return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
}
cleaned := filepath.Clean(repo)
if cleaned != repo {
- return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
// FIXME? In the namespaces within a repo,
@@ -117,7 +117,7 @@ func NewReference(image string, repo string) (types.ImageReference, error) {
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
- return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
+ return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
}
return ostreeReference{
@@ -213,7 +213,7 @@ func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.S
// DeleteImage deletes the named image from the registry, if supported.
func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for ostree: images")
+ return errors.New("Deleting images not implemented for ostree: images")
}
var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`)
diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
index c9971cbdc..2bbf48848 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
@@ -1,31 +1,16 @@
package blobcache
import (
- "bytes"
"context"
- "io"
+ "fmt"
"os"
"path/filepath"
- "sync"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
- v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-var (
- _ types.ImageReference = &BlobCache{}
- _ types.ImageSource = &blobCacheSource{}
- _ types.ImageDestination = &blobCacheDestination{}
)
const (
@@ -46,29 +31,6 @@ type BlobCache struct {
compress types.LayerCompression
}
-type blobCacheSource struct {
- reference *BlobCache
- source types.ImageSource
- sys types.SystemContext
- // this mutex synchronizes the counters below
- mu sync.Mutex
- cacheHits int64
- cacheMisses int64
- cacheErrors int64
-}
-
-type blobCacheDestination struct {
- reference *BlobCache
- destination types.ImageDestination
-}
-
-func makeFilename(blobSum digest.Digest, isConfig bool) string {
- if isConfig {
- return blobSum.String() + ".config"
- }
- return blobSum.String()
-}
-
// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
// written to the destination image created from the resulting reference will also be stored
// as-is to the specified directory or a temporary directory.
@@ -76,13 +38,13 @@ func makeFilename(blobSum digest.Digest, isConfig bool) string {
// or different version of a blob when preparing the list of layers when reading an image.
func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (*BlobCache, error) {
if directory == "" {
- return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref))
+ return nil, fmt.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref))
}
switch compress {
case types.Compress, types.Decompress, types.PreserveOriginal:
// valid value, accept it
default:
- return nil, errors.Errorf("unhandled LayerCompression value %v", compress)
+ return nil, fmt.Errorf("unhandled LayerCompression value %v", compress)
}
return &BlobCache{
reference: ref,
@@ -115,22 +77,46 @@ func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) e
return b.reference.DeleteImage(ctx, sys)
}
-func (b *BlobCache) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
- if blobinfo.Digest == "" {
- return false, -1, nil
+// blobPath returns the path appropriate for storing a blob with digest.
+func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) string {
+ baseName := digest.String()
+ if isConfig {
+ baseName += ".config"
+ }
+ return filepath.Join(b.directory, baseName)
+}
+
+// findBlob checks if we have a blob for info in cache (whether a config or not)
+// and if so, returns it path and size, and whether it was stored as a config.
+// It returns ("", -1, nil) if the blob is not
+func (b *BlobCache) findBlob(info types.BlobInfo) (string, int64, bool, error) {
+ if info.Digest == "" {
+ return "", -1, false, nil
}
for _, isConfig := range []bool{false, true} {
- filename := filepath.Join(b.directory, makeFilename(blobinfo.Digest, isConfig))
- fileInfo, err := os.Stat(filename)
- if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) {
- return true, fileInfo.Size(), nil
+ path := b.blobPath(info.Digest, isConfig)
+ fileInfo, err := os.Stat(path)
+ if err == nil && (info.Size == -1 || info.Size == fileInfo.Size()) {
+ return path, fileInfo.Size(), isConfig, nil
}
if !os.IsNotExist(err) {
- return false, -1, errors.Wrap(err, "checking size")
+ return "", -1, false, fmt.Errorf("checking size: %w", err)
}
}
+ return "", -1, false, nil
+
+}
+
+func (b *BlobCache) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
+ path, size, _, err := b.findBlob(blobinfo)
+ if err != nil {
+ return false, -1, err
+ }
+ if path != "" {
+ return true, size, nil
+ }
return false, -1, nil
}
@@ -141,17 +127,17 @@ func (b *BlobCache) Directory() string {
func (b *BlobCache) ClearCache() error {
f, err := os.Open(b.directory)
if err != nil {
- return errors.WithStack(err)
+ return err
}
defer f.Close()
names, err := f.Readdirnames(-1)
if err != nil {
- return errors.Wrapf(err, "error reading directory %q", b.directory)
+ return fmt.Errorf("error reading directory %q: %w", b.directory, err)
}
for _, name := range names {
pathname := filepath.Join(b.directory, name)
if err = os.RemoveAll(pathname); err != nil {
- return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(b))
+ return fmt.Errorf("clearing cache for %q: %w", transports.ImageName(b), err)
}
}
return nil
@@ -160,375 +146,3 @@ func (b *BlobCache) ClearCache() error {
func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return image.FromReference(ctx, sys, b)
}
-
-func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
- src, err := b.reference.NewImageSource(ctx, sys)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(b.reference))
- }
- logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(b.reference), b.directory, b.compress)
- return &blobCacheSource{reference: b, source: src, sys: *sys}, nil
-}
-
-func (b *BlobCache) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
- dest, err := b.reference.NewImageDestination(ctx, sys)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(b.reference))
- }
- logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(b.reference), b.directory)
- return &blobCacheDestination{reference: b, destination: dest}, nil
-}
-
-func (s *blobCacheSource) Reference() types.ImageReference {
- return s.reference
-}
-
-func (s *blobCacheSource) Close() error {
- logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors)
- return s.source.Close()
-}
-
-func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- if instanceDigest != nil {
- filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false))
- manifestBytes, err := os.ReadFile(filename)
- if err == nil {
- s.cacheHits++
- return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
- }
- if !os.IsNotExist(err) {
- s.cacheErrors++
- return nil, "", errors.Wrap(err, "checking for manifest file")
- }
- }
- s.cacheMisses++
- return s.source.GetManifest(ctx, instanceDigest)
-}
-
-func (s *blobCacheSource) HasThreadSafeGetBlob() bool {
- return s.source.HasThreadSafeGetBlob()
-}
-
-func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- present, size, err := s.reference.HasBlob(blobinfo)
- if err != nil {
- return nil, -1, err
- }
- if present {
- for _, isConfig := range []bool{false, true} {
- filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig))
- f, err := os.Open(filename)
- if err == nil {
- s.mu.Lock()
- s.cacheHits++
- s.mu.Unlock()
- return f, size, nil
- }
- if !os.IsNotExist(err) {
- s.mu.Lock()
- s.cacheErrors++
- s.mu.Unlock()
- return nil, -1, errors.Wrap(err, "checking for cache")
- }
- }
- }
- s.mu.Lock()
- s.cacheMisses++
- s.mu.Unlock()
- rc, size, err := s.source.GetBlob(ctx, blobinfo, cache)
- if err != nil {
- return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference))
- }
- return rc, size, nil
-}
-
-func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- return s.source.GetSignatures(ctx, instanceDigest)
-}
-
-func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- signatures, err := s.source.GetSignatures(ctx, instanceDigest)
- if err != nil {
- return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference))
- }
- canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0)
-
- infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest)
- if err != nil {
- return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference))
- }
- if infos == nil {
- img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest))
- if err != nil {
- return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference))
- }
- infos = img.LayerInfos()
- }
-
- if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
- replacedInfos := make([]types.BlobInfo, 0, len(infos))
- for _, info := range infos {
- var replaceDigest []byte
- var err error
- blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false))
- alternate := ""
- switch s.reference.compress {
- case types.Compress:
- alternate = blobFile + compressedNote
- replaceDigest, err = os.ReadFile(alternate)
- case types.Decompress:
- alternate = blobFile + decompressedNote
- replaceDigest, err = os.ReadFile(alternate)
- }
- if err == nil && digest.Digest(replaceDigest).Validate() == nil {
- alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false))
- fileInfo, err := os.Stat(alternate)
- if err == nil {
- switch info.MediaType {
- case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
- switch s.reference.compress {
- case types.Compress:
- info.MediaType = v1.MediaTypeImageLayerGzip
- info.CompressionAlgorithm = &compression.Gzip
- case types.Decompress:
- info.MediaType = v1.MediaTypeImageLayer
- info.CompressionAlgorithm = nil
- }
- case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType:
- switch s.reference.compress {
- case types.Compress:
- info.MediaType = manifest.DockerV2Schema2LayerMediaType
- info.CompressionAlgorithm = &compression.Gzip
- case types.Decompress:
- // nope, not going to suggest anything, it's not allowed by the spec
- replacedInfos = append(replacedInfos, info)
- continue
- }
- }
- logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String())
- info.CompressionOperation = s.reference.compress
- info.Digest = digest.Digest(replaceDigest)
- info.Size = fileInfo.Size()
- logrus.Debugf("info = %#v", info)
- }
- }
- replacedInfos = append(replacedInfos, info)
- }
- infos = replacedInfos
- }
-
- return infos, nil
-}
-
-func (d *blobCacheDestination) Reference() types.ImageReference {
- return d.reference
-}
-
-func (d *blobCacheDestination) Close() error {
- logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference))
- return d.destination.Close()
-}
-
-func (d *blobCacheDestination) SupportedManifestMIMETypes() []string {
- return d.destination.SupportedManifestMIMETypes()
-}
-
-func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error {
- return d.destination.SupportsSignatures(ctx)
-}
-
-func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression {
- return d.destination.DesiredLayerCompression()
-}
-
-func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool {
- return d.destination.AcceptsForeignLayerURLs()
-}
-
-func (d *blobCacheDestination) MustMatchRuntimeOS() bool {
- return d.destination.MustMatchRuntimeOS()
-}
-
-func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
- return d.destination.IgnoresEmbeddedDockerReference()
-}
-
-// Decompress and save the contents of the decompressReader stream into the passed-in temporary
-// file. If we successfully save all of the data, rename the file to match the digest of the data,
-// and make notes about the relationship between the file that holds a copy of the compressed data
-// and this new file.
-func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
- defer wg.Done()
- // Decompress from and digest the reading end of that pipe.
- decompressed, err3 := archive.DecompressStream(decompressReader)
- digester := digest.Canonical.Digester()
- if err3 == nil {
- // Read the decompressed data through the filter over the pipe, blocking until the
- // writing end is closed.
- _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
- } else {
- // Drain the pipe to keep from stalling the PutBlob() thread.
- if _, err := io.Copy(io.Discard, decompressReader); err != nil {
- logrus.Debugf("error draining the pipe: %v", err)
- }
- }
- decompressReader.Close()
- decompressed.Close()
- tempFile.Close()
- // Determine the name that we should give to the uncompressed copy of the blob.
- decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig))
- if err3 == nil {
- // Rename the temporary file.
- if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil {
- logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3)
- // Remove the temporary file.
- if err3 = os.Remove(tempFile.Name()); err3 != nil {
- logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
- }
- } else {
- *alternateDigest = digester.Digest()
- // Note the relationship between the two files.
- if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil {
- logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3)
- }
- if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil {
- logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3)
- }
- }
- } else {
- // Remove the temporary file.
- if err3 = os.Remove(tempFile.Name()); err3 != nil {
- logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
- }
- }
-}
-
-func (d *blobCacheDestination) HasThreadSafePutBlob() bool {
- return d.destination.HasThreadSafePutBlob()
-}
-
-func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- var tempfile *os.File
- var err error
- var n int
- var alternateDigest digest.Digest
- var closer io.Closer
- wg := new(sync.WaitGroup)
- needToWait := false
- compression := archive.Uncompressed
- if inputInfo.Digest != "" {
- filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
- tempfile, err = os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
- if err == nil {
- stream = io.TeeReader(stream, tempfile)
- defer func() {
- if err == nil {
- if err = os.Rename(tempfile.Name(), filename); err != nil {
- if err2 := os.Remove(tempfile.Name()); err2 != nil {
- logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
- }
- err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename)
- }
- } else {
- if err2 := os.Remove(tempfile.Name()); err2 != nil {
- logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
- }
- }
- tempfile.Close()
- }()
- } else {
- logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err)
- }
- if !isConfig {
- initial := make([]byte, 8)
- n, err = stream.Read(initial)
- if n > 0 {
- // Build a Reader that will still return the bytes that we just
- // read, for PutBlob()'s sake.
- stream = io.MultiReader(bytes.NewReader(initial[:n]), stream)
- if n >= len(initial) {
- compression = archive.DetectCompression(initial[:n])
- }
- if compression == archive.Gzip {
- // The stream is compressed, so create a file which we'll
- // use to store a decompressed copy.
- decompressedTemp, err2 := os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
- if err2 != nil {
- logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2)
- decompressedTemp.Close()
- } else {
- // Write a copy of the compressed data to a pipe,
- // closing the writing end of the pipe after
- // PutBlob() returns.
- decompressReader, decompressWriter := io.Pipe()
- closer = decompressWriter
- stream = io.TeeReader(stream, decompressWriter)
- // Let saveStream() close the reading end and handle the temporary file.
- wg.Add(1)
- needToWait = true
- go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest)
- }
- }
- }
- }
- }
- newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig)
- if closer != nil {
- closer.Close()
- }
- if needToWait {
- wg.Wait()
- }
- if err != nil {
- return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference))
- }
- if alternateDigest.Validate() == nil {
- logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory)
- } else {
- logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory)
- }
- return newBlobInfo, nil
-}
-
-func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute)
- if err != nil || present {
- return present, reusedInfo, err
- }
-
- for _, isConfig := range []bool{false, true} {
- filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig))
- f, err := os.Open(filename)
- if err == nil {
- defer f.Close()
- uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig)
- if err != nil {
- return false, types.BlobInfo{}, err
- }
- return true, uploadedInfo, nil
- }
- }
-
- return false, types.BlobInfo{}, nil
-}
-
-func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error {
- manifestDigest, err := manifest.Digest(manifestBytes)
- if err != nil {
- logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
- } else {
- filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false))
- if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
- logrus.Warnf("error saving manifest as %q: %v", filename, err)
- }
- }
- return d.destination.PutManifest(ctx, manifestBytes, instanceDigest)
-}
-
-func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- return d.destination.PutSignatures(ctx, signatures, instanceDigest)
-}
-
-func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- return d.destination.Commit(ctx, unparsedToplevel)
-}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go
new file mode 100644
index 000000000..c69eea6e3
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go
@@ -0,0 +1,294 @@
+package blobcache
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+type blobCacheDestination struct {
+ impl.Compat
+
+ reference *BlobCache
+ destination private.ImageDestination
+}
+
+func (b *BlobCache) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ dest, err := b.reference.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, fmt.Errorf("error creating new image destination %q: %w", transports.ImageName(b.reference), err)
+ }
+ logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(b.reference), b.directory)
+ d := &blobCacheDestination{reference: b, destination: imagedestination.FromPublic(dest)}
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+func (d *blobCacheDestination) Reference() types.ImageReference {
+ return d.reference
+}
+
+func (d *blobCacheDestination) Close() error {
+ logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference))
+ return d.destination.Close()
+}
+
+func (d *blobCacheDestination) SupportedManifestMIMETypes() []string {
+ return d.destination.SupportedManifestMIMETypes()
+}
+
+func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error {
+ return d.destination.SupportsSignatures(ctx)
+}
+
+func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression {
+ return d.destination.DesiredLayerCompression()
+}
+
+func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool {
+ return d.destination.AcceptsForeignLayerURLs()
+}
+
+func (d *blobCacheDestination) MustMatchRuntimeOS() bool {
+ return d.destination.MustMatchRuntimeOS()
+}
+
+func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.destination.IgnoresEmbeddedDockerReference()
+}
+
+// Decompress and save the contents of the decompressReader stream into the passed-in temporary
+// file. If we successfully save all of the data, rename the file to match the digest of the data,
+// and make notes about the relationship between the file that holds a copy of the compressed data
+// and this new file.
+func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
+ defer wg.Done()
+ // Decompress from and digest the reading end of that pipe.
+ decompressed, err3 := archive.DecompressStream(decompressReader)
+ digester := digest.Canonical.Digester()
+ if err3 == nil {
+ // Read the decompressed data through the filter over the pipe, blocking until the
+ // writing end is closed.
+ _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
+ } else {
+ // Drain the pipe to keep from stalling the PutBlob() thread.
+ if _, err := io.Copy(io.Discard, decompressReader); err != nil {
+ logrus.Debugf("error draining the pipe: %v", err)
+ }
+ }
+ decompressReader.Close()
+ decompressed.Close()
+ tempFile.Close()
+ // Determine the name that we should give to the uncompressed copy of the blob.
+ decompressedFilename := d.reference.blobPath(digester.Digest(), isConfig)
+ if err3 == nil {
+ // Rename the temporary file.
+ if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil {
+ logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3)
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ } else {
+ *alternateDigest = digester.Digest()
+ // Note the relationship between the two files.
+ if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3)
+ }
+ if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3)
+ }
+ }
+ } else {
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ }
+}
+
+func (d *blobCacheDestination) HasThreadSafePutBlob() bool {
+ return d.destination.HasThreadSafePutBlob()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ var tempfile *os.File
+ var err error
+ var n int
+ var alternateDigest digest.Digest
+ var closer io.Closer
+ wg := new(sync.WaitGroup)
+ needToWait := false
+ compression := archive.Uncompressed
+ if inputInfo.Digest != "" {
+ filename := d.reference.blobPath(inputInfo.Digest, options.IsConfig)
+ tempfile, err = os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
+ if err == nil {
+ stream = io.TeeReader(stream, tempfile)
+ defer func() {
+ if err == nil {
+ if err = os.Rename(tempfile.Name(), filename); err != nil {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ err = fmt.Errorf("error renaming new layer for blob %q into place at %q: %w", inputInfo.Digest.String(), filename, err)
+ }
+ } else {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ }
+ tempfile.Close()
+ }()
+ } else {
+ logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", filepath.Dir(filename), inputInfo.Digest.String(), err)
+ }
+ if !options.IsConfig {
+ initial := make([]byte, 8)
+ n, err = stream.Read(initial)
+ if n > 0 {
+ // Build a Reader that will still return the bytes that we just
+ // read, for PutBlob()'s sake.
+ stream = io.MultiReader(bytes.NewReader(initial[:n]), stream)
+ if n >= len(initial) {
+ compression = archive.DetectCompression(initial[:n])
+ }
+ if compression == archive.Gzip {
+ // The stream is compressed, so create a file which we'll
+ // use to store a decompressed copy.
+ decompressedTemp, err2 := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
+ if err2 != nil {
+ logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", filepath.Dir(filename), inputInfo.Digest.String(), err2)
+ } else {
+ // Write a copy of the compressed data to a pipe,
+ // closing the writing end of the pipe after
+ // PutBlob() returns.
+ decompressReader, decompressWriter := io.Pipe()
+ closer = decompressWriter
+ stream = io.TeeReader(stream, decompressWriter)
+ // Let saveStream() close the reading end and handle the temporary file.
+ wg.Add(1)
+ needToWait = true
+ go d.saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, options.IsConfig, &alternateDigest)
+ }
+ }
+ }
+ }
+ }
+ newBlobInfo, err := d.destination.PutBlobWithOptions(ctx, stream, inputInfo, options)
+ if closer != nil {
+ closer.Close()
+ }
+ if needToWait {
+ wg.Wait()
+ }
+ if err != nil {
+ return newBlobInfo, fmt.Errorf("error storing blob to image destination for cache %q: %w", transports.ImageName(d.reference), err)
+ }
+ if alternateDigest.Validate() == nil {
+ logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory)
+ } else {
+ logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory)
+ }
+ return newBlobInfo, nil
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *blobCacheDestination) SupportsPutBlobPartial() bool {
+ return d.destination.SupportsPutBlobPartial()
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options)
+ if err != nil || present {
+ return present, reusedInfo, err
+ }
+
+ blobPath, _, isConfig, err := d.reference.findBlob(info)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ if blobPath != "" {
+ f, err := os.Open(blobPath)
+ if err == nil {
+ defer f.Close()
+ uploadedInfo, err := d.destination.PutBlobWithOptions(ctx, f, info, private.PutBlobOptions{
+ Cache: options.Cache,
+ IsConfig: isConfig,
+ EmptyLayer: options.EmptyLayer,
+ LayerIndex: options.LayerIndex,
+ })
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ return true, uploadedInfo, nil
+ }
+ }
+
+ return false, types.BlobInfo{}, nil
+}
+
+func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error {
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
+ } else {
+ filename := d.reference.blobPath(manifestDigest, false)
+ if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
+ logrus.Warnf("error saving manifest as %q: %v", filename, err)
+ }
+ }
+ return d.destination.PutManifest(ctx, manifestBytes, instanceDigest)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *blobCacheDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ return d.destination.PutSignaturesWithFormat(ctx, signatures, instanceDigest)
+}
+
+func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ return d.destination.Commit(ctx, unparsedToplevel)
+}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/src.go b/vendor/github.com/containers/image/v5/pkg/blobcache/src.go
new file mode 100644
index 000000000..60677470f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/blobcache/src.go
@@ -0,0 +1,270 @@
+package blobcache
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+type blobCacheSource struct {
+ impl.Compat
+
+ reference *BlobCache
+ source private.ImageSource
+ sys types.SystemContext
+ // this mutex synchronizes the counters below
+ mu sync.Mutex
+ cacheHits int64
+ cacheMisses int64
+ cacheErrors int64
+}
+
+func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ src, err := b.reference.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, fmt.Errorf("error creating new image source %q: %w", transports.ImageName(b.reference), err)
+ }
+ logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(b.reference), b.directory, b.compress)
+ s := &blobCacheSource{reference: b, source: imagesource.FromPublic(src), sys: *sys}
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+func (s *blobCacheSource) Reference() types.ImageReference {
+ return s.reference
+}
+
+func (s *blobCacheSource) Close() error {
+ logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors)
+ return s.source.Close()
+}
+
+func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ filename := s.reference.blobPath(*instanceDigest, false)
+ manifestBytes, err := os.ReadFile(filename)
+ if err == nil {
+ s.cacheHits++
+ return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
+ }
+ if !os.IsNotExist(err) {
+ s.cacheErrors++
+ return nil, "", fmt.Errorf("checking for manifest file: %w", err)
+ }
+ }
+ s.cacheMisses++
+ return s.source.GetManifest(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) HasThreadSafeGetBlob() bool {
+ return s.source.HasThreadSafeGetBlob()
+}
+
+func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ blobPath, size, _, err := s.reference.findBlob(blobinfo)
+ if err != nil {
+ return nil, -1, err
+ }
+ if blobPath != "" {
+ f, err := os.Open(blobPath)
+ if err == nil {
+ s.mu.Lock()
+ s.cacheHits++
+ s.mu.Unlock()
+ return f, size, nil
+ }
+ if !os.IsNotExist(err) {
+ s.mu.Lock()
+ s.cacheErrors++
+ s.mu.Unlock()
+ return nil, -1, fmt.Errorf("checking for cache: %w", err)
+ }
+ }
+ s.mu.Lock()
+ s.cacheMisses++
+ s.mu.Unlock()
+ rc, size, err := s.source.GetBlob(ctx, blobinfo, cache)
+ if err != nil {
+ return rc, size, fmt.Errorf("error reading blob from source image %q: %w", transports.ImageName(s.reference), err)
+ }
+ return rc, size, nil
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *blobCacheSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return s.source.GetSignaturesWithFormat(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ signatures, err := s.source.GetSignaturesWithFormat(ctx, instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("error checking if image %q has signatures: %w", transports.ImageName(s.reference), err)
+ }
+ canReplaceBlobs := len(signatures) == 0
+
+ infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("error getting layer infos for copying image %q through cache: %w", transports.ImageName(s.reference), err)
+ }
+ if infos == nil {
+ img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest))
+ if err != nil {
+ return nil, fmt.Errorf("error opening image to get layer infos for copying image %q through cache: %w", transports.ImageName(s.reference), err)
+ }
+ infos = img.LayerInfos()
+ }
+
+ if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
+ replacedInfos := make([]types.BlobInfo, 0, len(infos))
+ for _, info := range infos {
+ var replaceDigest []byte
+ var err error
+ blobFile := s.reference.blobPath(info.Digest, false)
+ alternate := ""
+ switch s.reference.compress {
+ case types.Compress:
+ alternate = blobFile + compressedNote
+ replaceDigest, err = os.ReadFile(alternate)
+ case types.Decompress:
+ alternate = blobFile + decompressedNote
+ replaceDigest, err = os.ReadFile(alternate)
+ }
+ if err == nil && digest.Digest(replaceDigest).Validate() == nil {
+ alternate = s.reference.blobPath(digest.Digest(replaceDigest), false)
+ fileInfo, err := os.Stat(alternate)
+ if err == nil {
+ switch info.MediaType {
+ case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = v1.MediaTypeImageLayerGzip
+ info.CompressionAlgorithm = &compression.Gzip
+ case types.Decompress:
+ info.MediaType = v1.MediaTypeImageLayer
+ info.CompressionAlgorithm = nil
+ }
+ case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = manifest.DockerV2Schema2LayerMediaType
+ info.CompressionAlgorithm = &compression.Gzip
+ case types.Decompress:
+ // nope, not going to suggest anything, it's not allowed by the spec
+ replacedInfos = append(replacedInfos, info)
+ continue
+ }
+ }
+ logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String())
+ info.CompressionOperation = s.reference.compress
+ info.Digest = digest.Digest(replaceDigest)
+ info.Size = fileInfo.Size()
+ logrus.Debugf("info = %#v", info)
+ }
+ }
+ replacedInfos = append(replacedInfos, info)
+ }
+ infos = replacedInfos
+ }
+
+ return infos, nil
+}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (s *blobCacheSource) SupportsGetBlobAt() bool {
+ return s.source.SupportsGetBlobAt()
+}
+
+// streamChunksFromFile generates the channels returned by GetBlobAt for chunks of seekable file
+func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.ReadSeekCloser,
+ chunks []private.ImageSourceChunk) {
+ defer close(streams)
+ defer close(errs)
+ defer file.Close()
+
+ for _, c := range chunks {
+ // Always seek to the desired offest; that way we don’t need to care about the consumer
+ // not reading all of the chunk, or about the position going backwards.
+ if _, err := file.Seek(int64(c.Offset), io.SeekStart); err != nil {
+ errs <- err
+ break
+ }
+ s := signalCloseReader{
+ closed: make(chan interface{}),
+ stream: io.LimitReader(file, int64(c.Length)),
+ }
+ streams <- s
+
+ // Wait until the stream is closed before going to the next chunk
+ <-s.closed
+ }
+}
+
+type signalCloseReader struct {
+ closed chan interface{}
+ stream io.Reader
+}
+
+func (s signalCloseReader) Read(p []byte) (int, error) {
+ return s.stream.Read(p)
+}
+
+func (s signalCloseReader) Close() error {
+ close(s.closed)
+ return nil
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *blobCacheSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ blobPath, _, _, err := s.reference.findBlob(info)
+ if err != nil {
+ return nil, nil, err
+ }
+ if blobPath != "" {
+ f, err := os.Open(blobPath)
+ if err == nil {
+ s.mu.Lock()
+ s.cacheHits++
+ s.mu.Unlock()
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+ go streamChunksFromFile(streams, errs, f, chunks)
+ return streams, errs, nil
+ }
+ if !os.IsNotExist(err) {
+ s.mu.Lock()
+ s.cacheErrors++
+ s.mu.Unlock()
+ return nil, nil, fmt.Errorf("checking for cache: %w", err)
+ }
+ }
+ s.mu.Lock()
+ s.cacheMisses++
+ s.mu.Unlock()
+ streams, errs, err := s.source.GetBlobAt(ctx, info, chunks)
+ if err != nil {
+ return streams, errs, fmt.Errorf("error reading blob chunks from source image %q: %w", transports.ImageName(s.reference), err)
+ }
+ return streams, errs, nil
+}
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
index 34c90dd77..ce688d117 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/storage/pkg/chunked/compressor"
"github.com/klauspost/pgzip"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
)
@@ -151,13 +150,13 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
decompressor, stream, err := DetectCompression(stream)
if err != nil {
- return nil, false, errors.Wrapf(err, "detecting compression")
+ return nil, false, fmt.Errorf("detecting compression: %w", err)
}
var res io.ReadCloser
if decompressor != nil {
res, err = decompressor(stream)
if err != nil {
- return nil, false, errors.Wrapf(err, "initializing decompression")
+ return nil, false, fmt.Errorf("initializing decompression: %w", err)
}
} else {
res = io.NopCloser(stream)
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index d0bdd08e9..9623546d8 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -3,6 +3,7 @@ package config
import (
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"os"
"os/exec"
@@ -18,7 +19,6 @@ import (
helperclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -111,7 +111,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
}
func unsupportedNamespaceErr(helper string) error {
- return errors.Errorf("namespaced key is not supported for credential helper %s", helper)
+ return fmt.Errorf("namespaced key is not supported for credential helper %s", helper)
}
// SetAuthentication stores the username and password in the credential helper or file
@@ -149,7 +149,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// readJSONFile returns an empty map in case the path doesn't exist.
auths, err := readJSONFile(path.path, path.legacyFormat)
if err != nil {
- return nil, errors.Wrapf(err, "reading JSON file %q", path.path)
+ return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err)
}
// Credential helpers in the auth file have a
// direct mapping to a registry, so we can just
@@ -170,16 +170,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
creds, err := listAuthsFromCredHelper(helper)
if err != nil {
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
- }
- switch errors.Cause(err) {
- case nil:
- for registry := range creds {
- addKey(registry)
+ if errors.Is(err, exec.ErrNotFound) {
+ creds = nil // It's okay if the helper doesn't exist.
+ } else {
+ return nil, err
}
- case exec.ErrNotFound:
- // It's okay if the helper doesn't exist.
- default:
- return nil, err
+ }
+ for registry := range creds {
+ addKey(registry)
}
}
}
@@ -358,7 +356,7 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string)
return "", "", err
}
if auth.IdentityToken != "" {
- return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it")
+ return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported)
}
return auth.Username, auth.Password, nil
}
@@ -397,7 +395,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
return
}
}
- multiErr = multierror.Append(multiErr, errors.Wrapf(err, "removing credentials for %s from credential helper %s", key, helper))
+ multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
}
for _, helper := range helpers {
@@ -465,19 +463,19 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
default:
var creds map[string]string
creds, err = listAuthsFromCredHelper(helper)
- switch errors.Cause(err) {
- case nil:
- for registry := range creds {
- err = deleteAuthFromCredHelper(helper, registry)
- if err != nil {
- break
- }
+ if err != nil {
+ if errors.Is(err, exec.ErrNotFound) {
+ // It's okay if the helper doesn't exist.
+ continue
+ } else {
+ break
+ }
+ }
+ for registry := range creds {
+ err = deleteAuthFromCredHelper(helper, registry)
+ if err != nil {
+ break
}
- case exec.ErrNotFound:
- // It's okay if the helper doesn't exist.
- continue
- default:
- // fall through
}
}
if err != nil {
@@ -530,7 +528,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
// or made a typo while setting the environment variable,
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
- return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
+ return "", false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err)
} // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil
}
@@ -554,13 +552,13 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
if legacyFormat {
if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
- return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
+ return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
}
return auths, nil
}
if err = json.Unmarshal(raw, &auths); err != nil {
- return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
+ return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
}
if auths.AuthConfigs == nil {
@@ -592,21 +590,21 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
auths, err := readJSONFile(path, false)
if err != nil {
- return "", errors.Wrapf(err, "reading JSON file %q", path)
+ return "", fmt.Errorf("reading JSON file %q: %w", path, err)
}
updated, err := editor(&auths)
if err != nil {
- return "", errors.Wrapf(err, "updating %q", path)
+ return "", fmt.Errorf("updating %q: %w", path, err)
}
if updated {
newData, err := json.MarshalIndent(auths, "", "\t")
if err != nil {
- return "", errors.Wrapf(err, "marshaling JSON %q", path)
+ return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
}
if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil {
- return "", errors.Wrapf(err, "writing to file %q", path)
+ return "", fmt.Errorf("writing to file %q: %w", path, err)
}
}
@@ -660,7 +658,7 @@ func deleteAuthFromCredHelper(credHelper, registry string) error {
func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
auths, err := readJSONFile(path, legacyFormat)
if err != nil {
- return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path)
+ return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path, err)
}
// First try cred helpers. They should always be normalized.
@@ -781,7 +779,7 @@ func normalizeRegistry(registry string) string {
// allowed and returns an indicator if the key is namespaced.
func validateKey(key string) (bool, error) {
if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") {
- return false, errors.Errorf("key %s contains http[s]:// prefix", key)
+ return false, fmt.Errorf("key %s contains http[s]:// prefix", key)
}
// Ideally this should only accept explicitly valid keys, compare
diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
index 46c10ff63..3e16d8ca2 100644
--- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
+++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
@@ -1,6 +1,7 @@
package shortnames
import (
+ "errors"
"fmt"
"os"
"strings"
@@ -10,7 +11,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/manifoldco/promptui"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"golang.org/x/term"
)
@@ -33,12 +33,12 @@ func IsShortName(input string) bool {
func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
ref, err := reference.Parse(input)
if err != nil {
- return false, nil, errors.Wrapf(err, "cannot parse input: %q", input)
+ return false, nil, fmt.Errorf("cannot parse input: %q: %w", input, err)
}
named, ok := ref.(reference.Named)
if !ok {
- return true, nil, errors.Errorf("%q is not a named reference", input)
+ return true, nil, fmt.Errorf("%q is not a named reference", input)
}
registry := reference.Domain(named)
@@ -47,7 +47,7 @@ func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
// normalized (e.g., docker.io/alpine to docker.io/library/alpine.
named, err = reference.ParseNormalizedNamed(input)
if err != nil {
- return false, nil, errors.Wrapf(err, "cannot normalize input: %q", input)
+ return false, nil, fmt.Errorf("cannot normalize input: %q: %w", input, err)
}
return false, named, nil
}
@@ -87,7 +87,7 @@ func Add(ctx *types.SystemContext, name string, value reference.Named) error {
return err
}
if !isShort {
- return errors.Errorf("%q is not a short name", name)
+ return fmt.Errorf("%q is not a short name", name)
}
return sysregistriesv2.AddShortNameAlias(ctx, name, value.String())
}
@@ -102,7 +102,7 @@ func Remove(ctx *types.SystemContext, name string) error {
return err
}
if !isShort {
- return errors.Errorf("%q is not a short name", name)
+ return fmt.Errorf("%q is not a short name", name)
}
return sysregistriesv2.RemoveShortNameAlias(ctx, name)
}
@@ -172,7 +172,7 @@ func (r *Resolved) Description() string {
func (r *Resolved) FormatPullErrors(pullErrors []error) error {
if len(pullErrors) >= 0 && len(pullErrors) != len(r.PullCandidates) {
pullErrors = append(pullErrors,
- errors.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
+ fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
len(r.PullCandidates), len(pullErrors), len(r.PullCandidates)))
}
@@ -216,7 +216,7 @@ func (c *PullCandidate) Record() error {
value := reference.TrimNamed(c.Value)
if err := Add(c.resolved.systemContext, name.String(), value); err != nil {
- return errors.Wrapf(err, "recording short-name alias (%q=%q)", c.resolved.userInput, c.Value)
+ return fmt.Errorf("recording short-name alias (%q=%q): %w", c.resolved.userInput, c.Value, err)
}
return nil
}
@@ -262,7 +262,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
case types.ShortNameModeDisabled, types.ShortNameModePermissive, types.ShortNameModeEnforcing:
// We're good.
default:
- return nil, errors.Errorf("unsupported short-name mode (%v)", mode)
+ return nil, fmt.Errorf("unsupported short-name mode (%v)", mode)
}
isShort, shortRef, err := parseUnnormalizedShortName(name)
@@ -279,7 +279,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub {
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
- return nil, errors.Wrapf(err, "cannot normalize input: %q", name)
+ return nil, fmt.Errorf("cannot normalize input: %q: %w", name, err)
}
resolved.addCandidate(named)
resolved.rationale = rationaleEnforcedDockerHub
@@ -328,16 +328,16 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
// Error out if there's no matching alias and no search registries.
if len(unqualifiedSearchRegistries) == 0 {
if usrConfig != "" {
- return nil, errors.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
+ return nil, fmt.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
}
- return nil, errors.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name)
+ return nil, fmt.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name)
}
resolved.originDescription = usrConfig
for _, reg := range unqualifiedSearchRegistries {
named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
if err != nil {
- return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg)
+ return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err)
}
resolved.addCandidate(named)
}
@@ -364,7 +364,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
return nil, errors.New("short-name resolution enforced but cannot prompt without a TTY")
default:
// We should not end up here.
- return nil, errors.Errorf("unexpected short-name mode (%v) during resolution", mode)
+ return nil, fmt.Errorf("unexpected short-name mode (%v) during resolution", mode)
}
}
@@ -387,7 +387,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
named, err := reference.ParseNormalizedNamed(selection)
if err != nil {
- return nil, errors.Wrapf(err, "selection %q is not a valid reference", selection)
+ return nil, fmt.Errorf("selection %q is not a valid reference: %w", selection, err)
}
resolved.PullCandidates = nil
@@ -428,7 +428,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e
for _, reg := range registries {
named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
if err != nil {
- return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg)
+ return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err)
}
named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
candidates = append(candidates, named)
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
index 6909ea0a6..12939b24d 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
@@ -1,6 +1,7 @@
package sysregistriesv2
import (
+ "fmt"
"os"
"path/filepath"
"reflect"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -166,7 +166,7 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er
} else {
// If the name does not exist, throw an error.
if _, exists := conf.Aliases[name]; !exists {
- return errors.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
+ return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
}
delete(conf.Aliases, name)
@@ -210,25 +210,25 @@ func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
func parseShortNameValue(alias string) (reference.Named, error) {
ref, err := reference.Parse(alias)
if err != nil {
- return nil, errors.Wrapf(err, "parsing alias %q", alias)
+ return nil, fmt.Errorf("parsing alias %q: %w", alias, err)
}
if _, ok := ref.(reference.Digested); ok {
- return nil, errors.Errorf("invalid alias %q: must not contain digest", alias)
+ return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias)
}
if _, ok := ref.(reference.Tagged); ok {
- return nil, errors.Errorf("invalid alias %q: must not contain tag", alias)
+ return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias)
}
named, ok := ref.(reference.Named)
if !ok {
- return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
+ return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}
registry := reference.Domain(named)
if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
- return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
+ return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}
// A final parse to make sure that docker.io references are correctly
@@ -242,25 +242,25 @@ func parseShortNameValue(alias string) (reference.Named, error) {
func validateShortName(name string) error {
repo, err := reference.Parse(name)
if err != nil {
- return errors.Wrapf(err, "cannot parse short name: %q", name)
+ return fmt.Errorf("cannot parse short name: %q: %w", name, err)
}
if _, ok := repo.(reference.Digested); ok {
- return errors.Errorf("invalid short name %q: must not contain digest", name)
+ return fmt.Errorf("invalid short name %q: must not contain digest", name)
}
if _, ok := repo.(reference.Tagged); ok {
- return errors.Errorf("invalid short name %q: must not contain tag", name)
+ return fmt.Errorf("invalid short name %q: must not contain tag", name)
}
named, ok := repo.(reference.Named)
if !ok {
- return errors.Errorf("invalid short name %q: no name", name)
+ return fmt.Errorf("invalid short name %q: no name", name)
}
registry := reference.Domain(named)
if strings.ContainsAny(registry, ".:") || registry == "localhost" {
- return errors.Errorf("invalid short name %q: must not contain registry", name)
+ return fmt.Errorf("invalid short name %q: must not contain registry", name)
}
return nil
}
@@ -298,7 +298,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
if len(errs) > 0 {
err := errs[0]
for i := 1; i < len(errs); i++ {
- err = errors.Wrapf(err, "%v\n", errs[i])
+ err = fmt.Errorf("%v\n: %w", errs[i], err)
}
return nil, err
}
@@ -319,7 +319,7 @@ func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAli
meta, err := toml.DecodeFile(confPath, &conf)
if err != nil && !os.IsNotExist(err) {
// It's okay if the config doesn't exist. Other errors are not.
- return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
+ return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
}
if keys := meta.Undecoded(); len(keys) > 0 {
logrus.Debugf("Failed to decode keys %q from %q", keys, confPath)
@@ -329,7 +329,7 @@ func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAli
// file could still be corrupted by another process or user.
cache, err := newShortNameAliasCache(confPath, &conf)
if err != nil {
- return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
+ return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
}
return &conf, cache, nil
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index 002b28d75..41204dd9a 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -15,7 +15,6 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -103,7 +102,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
newNamedRef = e.Location + refString[prefixLen:]
newParsedRef, err := reference.ParseNamed(newNamedRef)
if err != nil {
- return nil, errors.Wrapf(err, "rewriting reference")
+ return nil, fmt.Errorf("rewriting reference: %w", err)
}
return newParsedRef, nil
@@ -666,7 +665,7 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
if err != nil && !os.IsNotExist(err) {
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
// directory.
- return nil, errors.Wrapf(err, "reading registries.conf.d")
+ return nil, fmt.Errorf("reading registries.conf.d: %w", err)
}
}
@@ -708,7 +707,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
return nil, err // Should never happen
}
} else {
- return nil, errors.Wrapf(err, "loading registries configuration %q", wrapper.configPath)
+ return nil, fmt.Errorf("loading registries configuration %q: %w", wrapper.configPath, err)
}
}
@@ -721,7 +720,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
// Enforce v2 format for drop-in-configs.
dropIn, err := loadConfigFile(path, true)
if err != nil {
- return nil, errors.Wrapf(err, "loading drop-in registries configuration %q", path)
+ return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
}
config.updateWithConfigurationFrom(dropIn)
}
@@ -782,7 +781,7 @@ func parseShortNameMode(mode string) (types.ShortNameMode, error) {
case "permissive":
return types.ShortNameModePermissive, nil
default:
- return types.ShortNameModeInvalid, errors.Errorf("invalid short-name mode: %q", mode)
+ return types.ShortNameModeInvalid, fmt.Errorf("invalid short-name mode: %q", mode)
}
}
@@ -975,7 +974,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
// Parse and validate short-name aliases.
cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf)
if err != nil {
- return nil, errors.Wrap(err, "validating short-name aliases")
+ return nil, fmt.Errorf("validating short-name aliases: %w", err)
}
res.aliasCache = cache
// Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and
diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
index c766417d0..9599aa3c9 100644
--- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
+++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
@@ -2,6 +2,7 @@ package tlsclientconfig
import (
"crypto/tls"
+ "fmt"
"net"
"net/http"
"os"
@@ -11,7 +12,6 @@ import (
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -49,7 +49,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
if tlsc.RootCAs == nil {
systemPool, err := tlsconfig.SystemCertPool()
if err != nil {
- return errors.Wrap(err, "unable to get system cert pool")
+ return fmt.Errorf("unable to get system cert pool: %w", err)
}
tlsc.RootCAs = systemPool
}
@@ -60,7 +60,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
keyName := certName[:len(certName)-5] + ".key"
logrus.Debugf(" cert: %s", fullPath)
if !hasFile(fs, keyName) {
- return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
+ return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
}
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
if err != nil {
@@ -73,7 +73,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
certName := keyName[:len(keyName)-4] + ".cert"
logrus.Debugf(" key: %s", fullPath)
if !hasFile(fs, certName) {
- return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
+ return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)
}
}
}
diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go
index ccf125966..b645f80dd 100644
--- a/vendor/github.com/containers/image/v5/sif/src.go
+++ b/vendor/github.com/containers/image/v5/sif/src.go
@@ -9,6 +9,9 @@ import (
"io"
"os"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@@ -19,6 +22,12 @@ import (
)
type sifImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
ref sifReference
workDir string
layerDigest digest.Digest
@@ -55,7 +64,7 @@ func getBlobInfo(path string) (digest.Digest, int64, error) {
// newImageSource returns an ImageSource for reading from an existing directory.
// newImageSource extracts SIF objects and saves them in a temp directory.
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (private.ImageSource, error) {
sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY))
if err != nil {
return nil, fmt.Errorf("loading SIF file: %w", err)
@@ -136,7 +145,12 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
}
succeeded = true
- return &sifImageSource{
+ s := &sifImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
ref: ref,
workDir: workDir,
layerDigest: layerDigest,
@@ -145,7 +159,9 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
config: configBytes,
configDigest: configDigest,
manifest: manifestBytes,
- }, nil
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// Reference returns the reference used to set up this source.
@@ -158,11 +174,6 @@ func (s *sifImageSource) Close() error {
return os.RemoveAll(s.workDir)
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *sifImageSource) HasThreadSafeGetBlob() bool {
- return true
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -191,26 +202,3 @@ func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest
}
return s.manifest, imgspecv1.MediaTypeImageManifest, nil
}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- return nil, errors.New("manifest lists are not supported by the sif transport")
- }
- return nil, nil
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go
index 8e9ce0dd2..b09502dfe 100644
--- a/vendor/github.com/containers/image/v5/signature/docker.go
+++ b/vendor/github.com/containers/image/v5/signature/docker.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
"github.com/opencontainers/go-digest"
)
@@ -56,18 +57,18 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
if keyIdentity != expectedKeyIdentity {
- return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity))
}
return nil
},
validateSignedDockerReference: func(signedDockerReference string) error {
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
if err != nil {
- return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference))
}
if signedRef.String() != expectedRef.String() {
- return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
- signedDockerReference, expectedDockerReference)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s",
+ signedDockerReference, expectedDockerReference))
}
return nil
},
@@ -77,7 +78,7 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
return err
}
if !matches {
- return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest))
}
return nil
},
diff --git a/vendor/github.com/containers/image/v5/signature/internal/errors.go b/vendor/github.com/containers/image/v5/signature/internal/errors.go
new file mode 100644
index 000000000..7872f0f43
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/errors.go
@@ -0,0 +1,15 @@
+package internal
+
+// InvalidSignatureError is returned when parsing an invalid signature.
+// This is publicly visible as signature.InvalidSignatureError
+type InvalidSignatureError struct {
+ msg string
+}
+
+func (err InvalidSignatureError) Error() string {
+ return err.msg
+}
+
+func NewInvalidSignatureError(msg string) InvalidSignatureError {
+ return InvalidSignatureError{msg: msg}
+}
diff --git a/vendor/github.com/containers/image/v5/signature/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go
index 9e592863d..0f39fe0ad 100644
--- a/vendor/github.com/containers/image/v5/signature/json.go
+++ b/vendor/github.com/containers/image/v5/signature/internal/json.go
@@ -1,4 +1,4 @@
-package signature
+package internal
import (
"bytes"
@@ -7,34 +7,34 @@ import (
"io"
)
-// jsonFormatError is returned when JSON does not match expected format.
-type jsonFormatError string
+// JSONFormatError is returned when JSON does not match expected format.
+type JSONFormatError string
-func (err jsonFormatError) Error() string {
+func (err JSONFormatError) Error() string {
return string(err)
}
-// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
//
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
// we could use reflection to automate this. Later?
-func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
+func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
seenKeys := map[string]struct{}{}
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
if err != nil {
- return jsonFormatError(err.Error())
+ return JSONFormatError(err.Error())
}
if t != json.Delim('{') {
- return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
+ return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
}
for {
t, err := dec.Token()
if err != nil {
- return jsonFormatError(err.Error())
+ return JSONFormatError(err.Error())
}
if t == json.Delim('}') {
break
@@ -43,34 +43,34 @@ func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
key, ok := t.(string)
if !ok {
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
- return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
+ return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
}
if _, ok := seenKeys[key]; ok {
- return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
+ return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
}
seenKeys[key] = struct{}{}
valuePtr := fieldResolver(key)
if valuePtr == nil {
- return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
+ return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
}
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
if err := dec.Decode(valuePtr); err != nil {
- return jsonFormatError(err.Error())
+ return JSONFormatError(err.Error())
}
}
if _, err := dec.Token(); err != io.EOF {
- return jsonFormatError("Unexpected data after JSON object")
+ return JSONFormatError("Unexpected data after JSON object")
}
return nil
}
-// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
// must be present exactly once, and none other fields are accepted.
-func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
+func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
seenKeys := map[string]struct{}{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if valuePtr, ok := exactFields[key]; ok {
seenKeys[key] = struct{}{}
return valuePtr
@@ -81,7 +81,7 @@ func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
}
for key := range exactFields {
if _, ok := seenKeys[key]; !ok {
- return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
+ return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
}
}
return nil
diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
new file mode 100644
index 000000000..bb5e9139d
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
@@ -0,0 +1,201 @@
+package internal
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/containers/image/v5/version"
+ digest "github.com/opencontainers/go-digest"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+)
+
+const (
+ sigstoreSignatureType = "cosign container image signature"
+ sigstoreHarcodedHashAlgorithm = crypto.SHA256
+)
+
+// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
+type UntrustedSigstorePayload struct {
+ UntrustedDockerManifestDigest digest.Digest
+ UntrustedDockerReference string // FIXME: more precise type?
+ UntrustedCreatorID *string
+ // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
+ // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
+ // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
+ // we would add another field, UntrustedTimestampNS int64.
+ UntrustedTimestamp *int64
+}
+
+// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
+// the specified primary contents and appropriate metadata.
+func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload {
+ // Use intermediate variables for these values so that we can take their addresses.
+ // Golang guarantees that they will have a new address on every execution.
+ creatorID := "containers/image " + version.Version
+ timestamp := time.Now().Unix()
+ return UntrustedSigstorePayload{
+ UntrustedDockerManifestDigest: dockerManifestDigest,
+ UntrustedDockerReference: dockerReference,
+ UntrustedCreatorID: &creatorID,
+ UntrustedTimestamp: &timestamp,
+ }
+}
+
+// Compile-time check that UntrustedSigstorePayload implements json.Marshaler
+var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
+ if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
+ return nil, errors.New("Unexpected empty signature content")
+ }
+ critical := map[string]interface{}{
+ "type": sigstoreSignatureType,
+ "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
+ "identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
+ }
+ optional := map[string]interface{}{}
+ if s.UntrustedCreatorID != nil {
+ optional["creator"] = *s.UntrustedCreatorID
+ }
+ if s.UntrustedTimestamp != nil {
+ optional["timestamp"] = *s.UntrustedTimestamp
+ }
+ signature := map[string]interface{}{
+ "critical": critical,
+ "optional": optional,
+ }
+ return json.Marshal(signature)
+}
+
+// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler
+var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
+ err := s.strictUnmarshalJSON(data)
+ if err != nil {
+ if formatErr, ok := err.(JSONFormatError); ok {
+ err = NewInvalidSignatureError(formatErr.Error())
+ }
+ }
+ return err
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
+// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
+ var critical, optional json.RawMessage
+ if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "critical": &critical,
+ "optional": &optional,
+ }); err != nil {
+ return err
+ }
+
+ var creatorID string
+ var timestamp float64
+ var gotCreatorID, gotTimestamp = false, false
+ // /usr/bin/cosign generates "optional": null if there are no user-specified annotations.
+ if !bytes.Equal(optional, []byte("null")) {
+ if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
+ switch key {
+ case "creator":
+ gotCreatorID = true
+ return &creatorID
+ case "timestamp":
+ gotTimestamp = true
+ return &timestamp
+ default:
+ var ignore interface{}
+ return &ignore
+ }
+ }); err != nil {
+ return err
+ }
+ }
+ if gotCreatorID {
+ s.UntrustedCreatorID = &creatorID
+ }
+ if gotTimestamp {
+ intTimestamp := int64(timestamp)
+ if float64(intTimestamp) != timestamp {
+ return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
+ }
+ s.UntrustedTimestamp = &intTimestamp
+ }
+
+ var t string
+ var image, identity json.RawMessage
+ if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
+ "type": &t,
+ "image": &image,
+ "identity": &identity,
+ }); err != nil {
+ return err
+ }
+ if t != sigstoreSignatureType {
+ return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t))
+ }
+
+ var digestString string
+ if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
+ "docker-manifest-digest": &digestString,
+ }); err != nil {
+ return err
+ }
+ s.UntrustedDockerManifestDigest = digest.Digest(digestString)
+
+ return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
+ "docker-reference": &s.UntrustedDockerReference,
+ })
+}
+
+// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable.
+// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies
+// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
+// because the functions have the same or similar types, so there is a risk of exchanging the functions;
+// named members of this struct are more explicit.
+type SigstorePayloadAcceptanceRules struct {
+ ValidateSignedDockerReference func(string) error
+ ValidateSignedDockerManifestDigest func(digest.Digest) error
+}
+
+// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components
+// match expected values, both as specified by rules, and returns it.
+// We return an *UntrustedSigstorePayload, although nothing actually uses it,
+// just to double-check against stupid typos.
+func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
+ verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm)
+ if err != nil {
+ return nil, fmt.Errorf("creating verifier: %w", err)
+ }
+
+ unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
+ if err != nil {
+ return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err))
+ }
+ // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil {
+ return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err))
+ }
+
+ var unmatchedPayload UntrustedSigstorePayload
+ if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
+ return nil, NewInvalidSignatureError(err.Error())
+ }
+ if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil {
+ return nil, err
+ }
+ if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil {
+ return nil, err
+ }
+ // SigstorePayloadAcceptanceRules have accepted this value.
+ return &unmatchedPayload, nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go
index 249b5a1fe..1d3fe0fdc 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism.go
@@ -65,7 +65,7 @@ func NewGPGSigningMechanism() (SigningMechanism, error) {
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
- return newEphemeralGPGSigningMechanism(blob)
+ return newEphemeralGPGSigningMechanism([][]byte{blob})
}
// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
index 4c7968417..2b2a7ad86 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
@@ -9,6 +9,7 @@ import (
"fmt"
"os"
+ "github.com/containers/image/v5/signature/internal"
"github.com/proglottis/gpgme"
)
@@ -32,10 +33,10 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith
}
// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
-// recognizes _only_ public keys from the supplied blob, and returns the identities
+// recognizes _only_ public keys from the supplied blobs, and returns the identities
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
-func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
+func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) {
dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-")
if err != nil {
return nil, nil, err
@@ -54,9 +55,13 @@ func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphras
ctx: ctx,
ephemeralDir: dir,
}
- keyIdentities, err := mech.importKeysFromBytes(blob)
- if err != nil {
- return nil, nil, err
+ keyIdentities := []string{}
+ for _, blob := range blobs {
+ ki, err := mech.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyIdentities = append(keyIdentities, ki...)
}
removeDir = false
@@ -181,13 +186,13 @@ func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []b
return nil, "", err
}
if len(sigs) != 1 {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs)))
}
sig := sigs[0]
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
// FIXME: Better error reporting eventually
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig))
}
return signedBuffer.Bytes(), sig.Fingerprint, nil
}
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
index 63cb7788b..5d6c1ac83 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
@@ -13,6 +13,7 @@ import (
"strings"
"time"
+ "github.com/containers/image/v5/signature/internal"
"github.com/containers/storage/pkg/homedir"
// This is a fallback code; the primary recommendation is to use the gpgme mechanism
// implementation, which is out-of-process and more appropriate for handling long-term private key material
@@ -62,14 +63,19 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith
// recognizes _only_ public keys from the supplied blob, and returns the identities
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
-func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
+func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) {
m := &openpgpSigningMechanism{
keyring: openpgp.EntityList{},
}
- keyIdentities, err := m.importKeysFromBytes(blob)
- if err != nil {
- return nil, nil, err
+ keyIdentities := []string{}
+ for _, blob := range blobs {
+ ki, err := m.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyIdentities = append(keyIdentities, ki...)
}
+
return m, keyIdentities, nil
}
@@ -144,19 +150,19 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
}
if md.SignedBy == nil {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", md.Signature))
}
if md.Signature != nil {
if md.Signature.SigLifetimeSecs != nil {
expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
if time.Now().After(expiry) {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry))
}
}
} else if md.SignatureV3 == nil {
// Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3,
// or sets md.SignatureError.
- return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"}
+ return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set")
}
// Uppercase the fingerprint to be compatible with gpgme
diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go
index 2ed0f882b..f8fdce2da 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_config.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_config.go
@@ -15,16 +15,17 @@ package signature
import (
"encoding/json"
+ "errors"
"fmt"
"os"
"path/filepath"
"regexp"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/signature/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
)
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
@@ -81,7 +82,7 @@ func NewPolicyFromFile(fileName string) (*Policy, error) {
}
policy, err := NewPolicyFromBytes(contents)
if err != nil {
- return nil, errors.Wrapf(err, "invalid policy in %q", fileName)
+ return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err)
}
return policy, nil
}
@@ -103,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil)
func (p *Policy) UnmarshalJSON(data []byte) error {
*p = Policy{}
transports := policyTransportsMap{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "default":
return &p.Default
@@ -134,10 +135,10 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyTransportScopes{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
// transport can be nil
transport := transports.Get(key)
- // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
}
@@ -180,8 +181,8 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyRequirements{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
- // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
+ // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
}
@@ -242,6 +243,8 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
res = &prSignedBy{}
case prTypeSignedBaseLayer:
res = &prSignedBaseLayer{}
+ case prTypeSigstoreSigned:
+ res = &prSigstoreSigned{}
default:
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
}
@@ -268,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
*pr = prInsecureAcceptAnything{}
var tmp prInsecureAcceptAnything
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -298,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil)
func (pr *prReject) UnmarshalJSON(data []byte) error {
*pr = prReject{}
var tmp prReject
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -312,12 +315,22 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
}
// newPRSignedBy returns a new prSignedBy if parameters are valid.
-func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
if !keyType.IsValid() {
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
}
- if len(keyPath) > 0 && len(keyData) > 0 {
- return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
+ keySources := 0
+ if keyPath != "" {
+ keySources++
+ }
+ if keyPaths != nil {
+ keySources++
+ }
+ if keyData != nil {
+ keySources++
+ }
+ if keySources != 1 {
+ return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified")
}
if signedIdentity == nil {
return nil, InvalidPolicyFormatError("signedIdentity not specified")
@@ -326,6 +339,7 @@ func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIden
prCommon: prCommon{Type: prTypeSignedBy},
KeyType: keyType,
KeyPath: keyPath,
+ KeyPaths: keyPaths,
KeyData: keyData,
SignedIdentity: signedIdentity,
}, nil
@@ -333,7 +347,7 @@ func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIden
// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type.
func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
- return newPRSignedBy(keyType, keyPath, nil, signedIdentity)
+ return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity)
}
// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath
@@ -341,9 +355,19 @@ func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity Poli
return newPRSignedByKeyPath(keyType, keyPath, signedIdentity)
}
+// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type.
+func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity)
+}
+
+// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths
+func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity)
+}
+
// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type.
func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
- return newPRSignedBy(keyType, "", keyData, signedIdentity)
+ return newPRSignedBy(keyType, "", nil, keyData, signedIdentity)
}
// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData
@@ -358,9 +382,9 @@ var _ json.Unmarshaler = (*prSignedBy)(nil)
func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
*pr = prSignedBy{}
var tmp prSignedBy
- var gotKeyPath, gotKeyData = false, false
+ var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false
var signedIdentity json.RawMessage
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
@@ -369,6 +393,9 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
case "keyPath":
gotKeyPath = true
return &tmp.KeyPath
+ case "keyPaths":
+ gotKeyPaths = true
+ return &tmp.KeyPaths
case "keyData":
gotKeyData = true
return &tmp.KeyData
@@ -397,16 +424,16 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
var res *prSignedBy
var err error
switch {
- case gotKeyPath && gotKeyData:
- return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
- case gotKeyPath && !gotKeyData:
+ case gotKeyPath && !gotKeyPaths && !gotKeyData:
res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity)
- case !gotKeyPath && gotKeyData:
+ case !gotKeyPath && gotKeyPaths && !gotKeyData:
+ res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyPaths && gotKeyData:
res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity)
- case !gotKeyPath && !gotKeyData:
- return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
- default: // Coverage: This should never happen
- return errors.Errorf("Impossible keyPath/keyData presence combination!?")
+ case !gotKeyPath && !gotKeyPaths && !gotKeyData:
+ return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present")
+ default:
+ return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present")
}
if err != nil {
return err
@@ -468,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
*pr = prSignedBaseLayer{}
var tmp prSignedBaseLayer
var baseLayerIdentity json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"baseLayerIdentity": &baseLayerIdentity,
}); err != nil {
@@ -491,6 +518,107 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
return nil
}
+// newPRSigstoreSigned returns a new prSigstoreSigned if parameters are valid.
+func newPRSigstoreSigned(keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
+ if len(keyPath) > 0 && len(keyData) > 0 {
+ return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
+ }
+ if signedIdentity == nil {
+ return nil, InvalidPolicyFormatError("signedIdentity not specified")
+ }
+ return &prSigstoreSigned{
+ prCommon: prCommon{Type: prTypeSigstoreSigned},
+ KeyPath: keyPath,
+ KeyData: keyData,
+ SignedIdentity: signedIdentity,
+ }, nil
+}
+
+// newPRSigstoreSignedKeyPath is NewPRSigstoreSignedKeyPath, except it returns the private type.
+func newPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
+ return newPRSigstoreSigned(keyPath, nil, signedIdentity)
+}
+
+// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath
+func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSigstoreSignedKeyPath(keyPath, signedIdentity)
+}
+
+// newPRSigstoreSignedKeyData is NewPRSigstoreSignedKeyData, except it returns the private type.
+func newPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
+ return newPRSigstoreSigned("", keyData, signedIdentity)
+}
+
+// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData
+func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSigstoreSignedKeyData(keyData, signedIdentity)
+}
+
+// Compile-time check that prSigstoreSigned implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
+ *pr = prSigstoreSigned{}
+ var tmp prSigstoreSigned
+ var gotKeyPath, gotKeyData = false, false
+ var signedIdentity json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
+ switch key {
+ case "type":
+ return &tmp.Type
+ case "keyPath":
+ gotKeyPath = true
+ return &tmp.KeyPath
+ case "keyData":
+ gotKeyData = true
+ return &tmp.KeyData
+ case "signedIdentity":
+ return &signedIdentity
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSigstoreSigned {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ if signedIdentity == nil {
+ tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
+ } else {
+ si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
+ if err != nil {
+ return err
+ }
+ tmp.SignedIdentity = si
+ }
+
+ var res *prSigstoreSigned
+ var err error
+ switch {
+ case gotKeyPath && gotKeyData:
+ return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
+ case gotKeyPath && !gotKeyData:
+ res, err = newPRSigstoreSignedKeyPath(tmp.KeyPath, tmp.SignedIdentity)
+ case !gotKeyPath && gotKeyData:
+ res, err = newPRSigstoreSignedKeyData(tmp.KeyData, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyData:
+ return InvalidPolicyFormatError("At least one of keyPath and keyData must be specified")
+ default: // Coverage: This should never happen
+ return fmt.Errorf("Impossible keyPath/keyData presence combination!?")
+ }
+ if err != nil {
+ // Coverage: This cannot currently happen, creating a prSigstoreSigned only fails
+ // if signedIdentity is nil, which we replace with a default above.
+ return err
+ }
+ *pr = *res
+
+ return nil
+}
+
// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation.
func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
var typeField prmCommon
@@ -537,7 +665,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchExact{}
var tmp prmMatchExact
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -567,7 +695,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepoDigestOrExact{}
var tmp prmMatchRepoDigestOrExact
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -597,7 +725,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepository{}
var tmp prmMatchRepository
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -637,7 +765,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
*prm = prmExactReference{}
var tmp prmExactReference
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"dockerReference": &tmp.DockerReference,
}); err != nil {
@@ -679,7 +807,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
*prm = prmExactRepository{}
var tmp prmExactRepository
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"dockerRepository": &tmp.DockerRepository,
}); err != nil {
@@ -751,7 +879,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil)
func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
*prm = prmRemapIdentity{}
var tmp prmRemapIdentity
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"prefix": &tmp.Prefix,
"signedPrefix": &tmp.SignedPrefix,
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go
index edcbf52f4..2edf8397c 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go
@@ -7,9 +7,11 @@ package signature
import (
"context"
+ "fmt"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/unparsedimage"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -55,14 +57,14 @@ type PolicyRequirement interface {
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
- isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
+ isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
// isRunningImageAllowed returns true if the requirement allows running an image.
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
- isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error)
+ isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error)
}
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
@@ -71,7 +73,7 @@ type PolicyReferenceMatch interface {
// matchesDockerReference decides whether a specific image identity is accepted for an image
// (or, usually, for the image's Reference().DockerReference()). Note that
// image.Reference().DockerReference() may be nil.
- matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool
+ matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool
}
// PolicyContext encapsulates a policy and possible cached state
@@ -95,7 +97,7 @@ const (
// changeContextState changes pc.state, or fails if the state is unexpected
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
if pc.state != expected {
- return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
+ return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
}
pc.state = new
return nil
@@ -174,7 +176,7 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
-func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) {
+func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return nil, err
}
@@ -185,11 +187,12 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, im
}
}()
+ image := unparsedimage.FromPublic(publicImage)
+
logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
- // FIXME: rename Signatures to UnverifiedSignatures
- // FIXME: pass context.Context
+ // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!)
unverifiedSignatures, err := image.Signatures(ctx)
if err != nil {
return nil, err
@@ -255,7 +258,7 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, im
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
-func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) {
+func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return false, err
}
@@ -266,6 +269,8 @@ func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.
}
}()
+ image := unparsedimage.FromPublic(publicImage)
+
logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
index 55cdd3054..a8bc01301 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
@@ -5,15 +5,15 @@ package signature
import (
"context"
- "github.com/containers/image/v5/types"
+ "github.com/containers/image/v5/internal/private"
"github.com/sirupsen/logrus"
)
-func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarUnknown, nil, nil
}
-func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
// FIXME? Reject this at policy parsing time already?
logrus.Errorf("signedBaseLayer not implemented yet!")
return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
index 65e825973..ef98b8b83 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
@@ -4,44 +4,59 @@ package signature
import (
"context"
+ "errors"
"fmt"
"os"
"strings"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
-func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
switch pr.KeyType {
case SBKeyTypeGPGKeys:
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
// FIXME? Reject this at policy parsing time already?
- return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
+ return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType))
default:
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
- return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
+ return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType))
}
- if pr.KeyPath != "" && pr.KeyData != nil {
- return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
- }
// FIXME: move this to per-context initialization
- var data []byte
- if pr.KeyData != nil {
- data = pr.KeyData
- } else {
+ var data [][]byte
+ keySources := 0
+ if pr.KeyPath != "" {
+ keySources++
d, err := os.ReadFile(pr.KeyPath)
if err != nil {
return sarRejected, nil, err
}
- data = d
+ data = [][]byte{d}
+ }
+ if pr.KeyPaths != nil {
+ keySources++
+ data = [][]byte{}
+ for _, path := range pr.KeyPaths {
+ d, err := os.ReadFile(path)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ data = append(data, d)
+ }
+ }
+ if pr.KeyData != nil {
+ keySources++
+ data = [][]byte{pr.KeyData}
+ }
+ if keySources != 1 {
+ return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`)
}
// FIXME: move this to per-context initialization
- mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data)
+ mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data)
if err != nil {
return sarRejected, nil, err
}
@@ -89,8 +104,9 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types
return sarAccepted, signature, nil
}
-func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
- // FIXME: pass context.Context
+func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ // FIXME: Use image.UntrustedSignatures, use that to improve error messages
+ // (needs tests!)
sigs, err := image.Signatures(ctx)
if err != nil {
return false, err
@@ -108,7 +124,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.Unp
// Huh?! This should not happen at all; treat it as any other invalid value.
fallthrough
default:
- reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
}
rejections = append(rejections, reason)
}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
new file mode 100644
index 000000000..ccf1d80ac
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
@@ -0,0 +1,140 @@
+// Policy evaluation for prSigstoreSigned.
+
+package signature
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ // We don’t know of a single user of this API, and we might return unexpected values in Signature.
+ // For now, just punt.
+ return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore")
+}
+
+func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) {
+ if pr.KeyPath != "" && pr.KeyData != nil {
+ return sarRejected, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
+ }
+ // FIXME: move this to per-context initialization
+ var publicKeyPEM []byte
+ if pr.KeyData != nil {
+ publicKeyPEM = pr.KeyData
+ } else {
+ d, err := os.ReadFile(pr.KeyPath)
+ if err != nil {
+ return sarRejected, err
+ }
+ publicKeyPEM = d
+ }
+
+ publicKey, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
+ if err != nil {
+ return sarRejected, fmt.Errorf("parsing public key: %w", err)
+ }
+
+ untrustedAnnotations := sig.UntrustedAnnotations()
+ untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey]
+ if !ok {
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey)
+ }
+
+ signature, err := internal.VerifySigstorePayload(publicKey, sig.UntrustedPayload(), untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
+ ValidateSignedDockerReference: func(ref string) error {
+ if !pr.SignedIdentity.matchesDockerReference(image, ref) {
+ return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
+ }
+ return nil
+ },
+ ValidateSignedDockerManifestDigest: func(digest digest.Digest) error {
+ m, _, err := image.Manifest(ctx)
+ if err != nil {
+ return err
+ }
+ digestMatches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return err
+ }
+ if !digestMatches {
+ return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return sarRejected, err
+ }
+ if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values
+ return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen.
+ }
+
+ return sarAccepted, nil
+}
+
+func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ sigs, err := image.UntrustedSignatures(ctx)
+ if err != nil {
+ return false, err
+ }
+ var rejections []error
+ foundNonSigstoreSignatures := 0
+ foundSigstoreNonAttachments := 0
+ for _, s := range sigs {
+ sigstoreSig, ok := s.(signature.Sigstore)
+ if !ok {
+ foundNonSigstoreSignatures++
+ continue
+ }
+ if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType {
+ foundSigstoreNonAttachments++
+ continue
+ }
+
+ var reason error
+ switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res {
+ case sarAccepted:
+ // One accepted signature is enough.
+ return true, nil
+ case sarRejected:
+ reason = err
+ case sarUnknown:
+ // Huh?! This should not happen at all; treat it as any other invalid value.
+ fallthrough
+ default:
+ reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ }
+ rejections = append(rejections, reason)
+ }
+ var summary error
+ switch len(rejections) {
+ case 0:
+ if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 {
+ // A nice message for the most common case.
+ summary = PolicyRequirementError("A signature was required, but no signature exists")
+ } else {
+ summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)",
+ foundNonSigstoreSignatures, foundSigstoreNonAttachments))
+ }
+ case 1:
+ summary = rejections[0]
+ default:
+ var msgs []string
+ for _, e := range rejections {
+ msgs = append(msgs, e.Error())
+ }
+ summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
+ strings.Join(msgs, "; ")))
+ }
+ return false, summary
+}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
index f949088b5..031866f0d 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
@@ -6,24 +6,24 @@ import (
"context"
"fmt"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/types"
)
-func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
// prInsecureAcceptAnything semantics: Every image is allowed to run,
// but this does not consider the signature as verified.
return sarUnknown, nil, nil
}
-func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
return true, nil
}
-func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
}
-func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
index 064866cf6..4e70c0f2e 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
@@ -7,12 +7,12 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/types"
)
// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
-func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
+func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
r1 := image.Reference().DockerReference()
if r1 == nil {
return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
@@ -25,7 +25,7 @@ func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (referen
return r1, r2, nil
}
-func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -56,7 +56,7 @@ func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named)
return false
}
}
-func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -64,7 +64,7 @@ func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.Unparse
return matchRepoDigestOrExactReferenceValues(intended, signature)
}
-func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -85,7 +85,7 @@ func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, err
return r1, r2, nil
}
-func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
if err != nil {
return false
@@ -97,7 +97,7 @@ func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage,
return signature.String() == intended.String()
}
-func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
if err != nil {
return false
@@ -141,7 +141,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc
return newParsedRef, nil
}
-func (prm *prmRemapIdentity) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go
index c6819929b..9e837452a 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_types.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_types.go
@@ -46,6 +46,7 @@ const (
prTypeReject prTypeIdentifier = "reject"
prTypeSignedBy prTypeIdentifier = "signedBy"
prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
+ prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned"
)
// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
@@ -66,18 +67,20 @@ type prReject struct {
type prSignedBy struct {
prCommon
- // KeyType specifies what kind of key reference KeyPath/KeyData is.
+ // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is.
// Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs”
// FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
KeyType sbKeyType `json:"keyType"`
- // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified.
+ // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyPath string `json:"keyPath,omitempty"`
- // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified.
+ // KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
+ KeyPaths []string `json:"keyPaths,omitempty"`
+ // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyData []byte `json:"keyData,omitempty"`
// SignedIdentity specifies what image identity the signature must be claiming about the image.
- // Defaults to "match-exact" if not specified.
+ // Defaults to "matchRepoDigestOrExact" if not specified.
SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
}
@@ -104,6 +107,24 @@ type prSignedBaseLayer struct {
BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
}
+// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity
+type prSigstoreSigned struct {
+ prCommon
+
+ // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath and KeyData must be specified.
+ KeyPath string `json:"keyPath,omitempty"`
+ // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath and KeyData must be specified.
+ KeyData []byte `json:"keyData,omitempty"`
+ // FIXME: Multiple public keys?
+
+ // FIXME: Support fulcio+rekor as an alternative.
+
+ // SignedIdentity specifies what image identity the signature must be claiming about the image.
+ // Defaults to "matchRepoDigestOrExact" if not specified.
+ // Note that /usr/bin/cosign interoperability might require using repo-only matching.
+ SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
+}
+
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
// The type is public, but its implementation is private.
diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go
new file mode 100644
index 000000000..dbc03ec0a
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go
@@ -0,0 +1,70 @@
+package sigstore
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/theupdateframework/go-tuf/encrypted"
+)
+
+// The following code was copied from github.com/sigstore.
+// FIXME: Eliminate that duplication.
+
+// Copyright 2021 The Sigstore Authors.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+const (
+ // from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType
+ sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
+)
+
+// from sigstore/cosign/pkg/cosign.loadPrivateKey
+// FIXME: Do we need all of these key formats, and all of those
+func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
+ // Decrypt first
+ p, _ := pem.Decode(key)
+ if p == nil {
+ return nil, errors.New("invalid pem block")
+ }
+ if p.Type != sigstorePrivateKeyPemType {
+ return nil, fmt.Errorf("unsupported pem type: %s", p.Type)
+ }
+
+ x509Encoded, err := encrypted.Decrypt(p.Bytes, pass)
+ if err != nil {
+ return nil, fmt.Errorf("decrypt: %w", err)
+ }
+
+ pk, err := x509.ParsePKCS8PrivateKey(x509Encoded)
+ if err != nil {
+ return nil, fmt.Errorf("parsing private key: %w", err)
+ }
+ switch pk := pk.(type) {
+ case *rsa.PrivateKey:
+ return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256)
+ case *ecdsa.PrivateKey:
+ return signature.LoadECDSASignerVerifier(pk, crypto.SHA256)
+ case ed25519.PrivateKey:
+ return signature.LoadED25519SignerVerifier(pk)
+ default:
+ return nil, errors.New("unsupported key type")
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/sign.go b/vendor/github.com/containers/image/v5/signature/sigstore/sign.go
new file mode 100644
index 000000000..daa6ab387
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/sigstore/sign.go
@@ -0,0 +1,65 @@
+package sigstore
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+)
+
+// SignDockerManifestWithPrivateKeyFileUnstable returns a signature for manifest as the specified dockerReference,
+// using a private key and an optional passphrase.
+//
+// Yes, this returns an internal type, and should currently not be used outside of c/image.
+// There is NO COMITTMENT TO STABLE API.
+func SignDockerManifestWithPrivateKeyFileUnstable(m []byte, dockerReference reference.Named, privateKeyFile string, passphrase []byte) (signature.Sigstore, error) {
+ privateKeyPEM, err := os.ReadFile(privateKeyFile)
+ if err != nil {
+ return signature.Sigstore{}, fmt.Errorf("reading private key from %s: %w", privateKeyFile, err)
+ }
+ signer, err := loadPrivateKey(privateKeyPEM, passphrase)
+ if err != nil {
+ return signature.Sigstore{}, fmt.Errorf("initializing private key: %w", err)
+ }
+
+ return signDockerManifest(m, dockerReference, signer)
+}
+
+func signDockerManifest(m []byte, dockerReference reference.Named, signer sigstoreSignature.Signer) (signature.Sigstore, error) {
+ if reference.IsNameOnly(dockerReference) {
+ return signature.Sigstore{}, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
+ }
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return signature.Sigstore{}, err
+ }
+ // sigstore/cosign completely ignores dockerReference for actual policy decisions.
+ // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks.
+ // So, just do what simple signing does, and cosign won’t mind.
+ payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String())
+ payloadBytes, err := json.Marshal(payloadData)
+ if err != nil {
+ return signature.Sigstore{}, err
+ }
+
+ // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ signatureBytes, err := signer.SignMessage(bytes.NewReader(payloadBytes))
+ if err != nil {
+ return signature.Sigstore{}, fmt.Errorf("creating signature: %w", err)
+ }
+ base64Signature := base64.StdEncoding.EncodeToString(signatureBytes)
+
+ return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType,
+ payloadBytes,
+ map[string]string{
+ signature.SigstoreSignatureAnnotationKey: base64Signature,
+ }), nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/simple.go
index 05bf8229e..1ca571e5a 100644
--- a/vendor/github.com/containers/image/v5/signature/signature.go
+++ b/vendor/github.com/containers/image/v5/signature/simple.go
@@ -6,12 +6,13 @@ package signature
import (
"encoding/json"
+ "errors"
"fmt"
"time"
+ "github.com/containers/image/v5/signature/internal"
"github.com/containers/image/v5/version"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
const (
@@ -19,13 +20,7 @@ const (
)
// InvalidSignatureError is returned when parsing an invalid signature.
-type InvalidSignatureError struct {
- msg string
-}
-
-func (err InvalidSignatureError) Error() string {
- return err.msg
-}
+type InvalidSignatureError = internal.InvalidSignatureError
// Signature is a parsed content of a signature.
// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
@@ -111,18 +106,18 @@ var _ json.Unmarshaler = (*untrustedSignature)(nil)
func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
err := s.strictUnmarshalJSON(data)
if err != nil {
- if formatErr, ok := err.(jsonFormatError); ok {
- err = InvalidSignatureError{msg: formatErr.Error()}
+ if formatErr, ok := err.(internal.JSONFormatError); ok {
+ err = internal.NewInvalidSignatureError(formatErr.Error())
}
}
return err
}
-// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
-// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type.
+// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"critical": &critical,
"optional": &optional,
}); err != nil {
@@ -132,7 +127,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var creatorID string
var timestamp float64
var gotCreatorID, gotTimestamp = false, false
- if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
switch key {
case "creator":
gotCreatorID = true
@@ -153,14 +148,14 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
- return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"}
+ return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
}
s.UntrustedTimestamp = &intTimestamp
}
var t string
var image, identity json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
"type": &t,
"image": &image,
"identity": &identity,
@@ -168,18 +163,18 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
return err
}
if t != signatureType {
- return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t))
}
var digestString string
- if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
- return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
+ return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
"docker-reference": &s.UntrustedDockerReference,
})
}
@@ -231,7 +226,7 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
var unmatchedSignature untrustedSignature
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
- return nil, InvalidSignatureError{msg: err.Error()}
+ return nil, internal.NewInvalidSignatureError(err.Error())
}
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
return nil, err
@@ -269,7 +264,7 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []
}
var untrustedDecodedContents untrustedSignature
if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
- return nil, InvalidSignatureError{msg: err.Error()}
+ return nil, internal.NewInvalidSignatureError(err.Error())
}
var timestamp *time.Time // = nil
diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go
new file mode 100644
index 000000000..ae3bfa8fa
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go
@@ -0,0 +1,924 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ graphdriver "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
+ // with a digest-based name that doesn't match its contents.
+ // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value),
+ // and there is no known user of this error.
+ ErrBlobDigestMismatch = errors.New("blob digest mismatch")
+ // ErrBlobSizeMismatch is returned when PutBlob() is given a blob
+ // with an expected size that doesn't match the reader.
+ ErrBlobSizeMismatch = errors.New("blob size mismatch")
+)
+
+type storageImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.ImplementsPutBlobPartial
+ stubs.AlwaysSupportsSignatures
+
+ imageRef storageReference
+ directory string // Temporary directory where we store blobs until Commit() time
+ nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
+ manifest []byte // Manifest contents, temporary
+ manifestDigest digest.Digest // Valid if len(manifest) != 0
+ signatures []byte // Signature contents, temporary
+ signatureses map[digest.Digest][]byte // Instance signature contents, temporary
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
+
+ // A storage destination may be used concurrently. Accesses are
+ // serialized via a mutex. Please refer to the individual comments
+ // below for details.
+ lock sync.Mutex
+ // Mapping from layer (by index) to the associated ID in the storage.
+ // It's protected *implicitly* since `commitLayer()`, at any given
+ // time, can only be executed by *one* goroutine. Please refer to
+ // `queueOrCommit()` for further details on how the single-caller
+ // guarantee is implemented.
+ indexToStorageID map[int]*string
+ // All accesses to below data are protected by `lock` which is made
+ // *explicit* in the code.
+ blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
+ fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
+ filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
+ currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
+ indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
+ blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
+ diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
+}
+
+// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
+// it's time to Commit() the image
+func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
+ directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
+ if err != nil {
+ return nil, fmt.Errorf("creating a temporary directory: %w", err)
+ }
+ dest := &storageImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ },
+ // We ultimately have to decompress layers to populate trees on disk
+ // and need to explicitly ask for it here, so that the layers' MIME
+ // types can be set accordingly.
+ DesiredLayerCompression: types.PreserveOriginal,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: true,
+ IgnoresEmbeddedDockerReference: true, // Yes, we want the unmodified manifest
+ HasThreadSafePutBlob: true,
+ }),
+
+ imageRef: imageRef,
+ directory: directory,
+ signatureses: make(map[digest.Digest][]byte),
+ blobDiffIDs: make(map[digest.Digest]digest.Digest),
+ blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
+ fileSizes: make(map[digest.Digest]int64),
+ filenames: make(map[digest.Digest]string),
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
+ indexToStorageID: make(map[int]*string),
+ indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
+ diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (s *storageImageDestination) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up the temporary directory and additional layer store handlers.
+func (s *storageImageDestination) Close() error {
+ for _, al := range s.blobAdditionalLayer {
+ al.Release()
+ }
+ for _, v := range s.diffOutputs {
+ if v.Target != "" {
+ _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
+ }
+ }
+ return os.RemoveAll(s.directory)
+}
+
+func (s *storageImageDestination) computeNextBlobCacheFile() string {
+ return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options)
+ if err != nil {
+ return info, err
+ }
+
+ if options.IsConfig || options.LayerIndex == nil {
+ return info, nil
+ }
+
+ return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
+}
+
+// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
+func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
+ // Stores a layer or data blob in our temporary directory, checking that any information
+ // in the blobinfo matches the incoming data.
+ errorBlobInfo := types.BlobInfo{
+ Digest: "",
+ Size: -1,
+ }
+ if blobinfo.Digest != "" {
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
+ }
+ }
+
+ // Set up to digest the blob if necessary, and count its size while saving it to a file.
+ filename := s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ return errorBlobInfo, fmt.Errorf("creating temporary file %q: %w", filename, err)
+ }
+ defer file.Close()
+ counter := ioutils.NewWriteCounter(file)
+ stream = io.TeeReader(stream, counter)
+ digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
+ decompressed, err := archive.DecompressStream(stream)
+ if err != nil {
+ return errorBlobInfo, fmt.Errorf("setting up to decompress blob: %w", err)
+ }
+
+ diffID := digest.Canonical.Digester()
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ _, err = io.Copy(diffID.Hash(), decompressed)
+ decompressed.Close()
+ if err != nil {
+ return errorBlobInfo, fmt.Errorf("storing blob to file %q: %w", filename, err)
+ }
+
+ // Determine blob properties, and fail if information that we were given about the blob
+ // is known to be incorrect.
+ blobDigest := digester.Digest()
+ blobSize := blobinfo.Size
+ if blobSize < 0 {
+ blobSize = counter.Count
+ } else if blobinfo.Size != counter.Count {
+ return errorBlobInfo, ErrBlobSizeMismatch
+ }
+
+ // Record information about the blob.
+ s.lock.Lock()
+ s.blobDiffIDs[blobDigest] = diffID.Digest()
+ s.fileSizes[blobDigest] = counter.Count
+ s.filenames[blobDigest] = filename
+ s.lock.Unlock()
+ // This is safe because we have just computed diffID, and blobDigest was either computed
+ // by us, or validated by the caller (usually copy.digestingReader).
+ options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
+ return types.BlobInfo{
+ Digest: blobDigest,
+ Size: blobSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+}
+
+type zstdFetcher struct {
+ chunkAccessor private.BlobChunkAccessor
+ ctx context.Context
+ blobInfo types.BlobInfo
+}
+
+// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
+func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ var newChunks []private.ImageSourceChunk
+ for _, v := range chunks {
+ i := private.ImageSourceChunk{
+ Offset: v.Offset,
+ Length: v.Length,
+ }
+ newChunks = append(newChunks, i)
+ }
+ rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks)
+ if _, ok := err.(private.BadPartialRequestError); ok {
+ err = chunked.ErrBadRequest{}
+ }
+ return rc, errs, err
+
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ fetcher := zstdFetcher{
+ chunkAccessor: chunkAccessor,
+ ctx: ctx,
+ blobInfo: srcInfo,
+ }
+
+ differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
+ if err != nil {
+ return srcInfo, err
+ }
+
+ out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
+ if err != nil {
+ return srcInfo, err
+ }
+
+ blobDigest := srcInfo.Digest
+
+ s.lock.Lock()
+ s.blobDiffIDs[blobDigest] = blobDigest
+ s.fileSizes[blobDigest] = 0
+ s.filenames[blobDigest] = ""
+ s.diffOutputs[blobDigest] = out
+ s.lock.Unlock()
+
+ return srcInfo, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options)
+ if err != nil || !reused || options.LayerIndex == nil {
+ return reused, info, err
+ }
+
+ return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
+}
+
+// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
+func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ // lock the entire method as it executes fairly quickly
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if options.SrcRef != nil {
+ // Check if we have the layer in the underlying additional layer store.
+ aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String())
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobinfo.Digest, err)
+ } else if err == nil {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
+ s.blobAdditionalLayer[blobinfo.Digest] = aLayer
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: aLayer.CompressedSize(),
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
+
+ if blobinfo.Digest == "" {
+ return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`)
+ }
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
+ }
+
+ // Check if we've already cached it in a file.
+ if size, ok := s.fileSizes[blobinfo.Digest]; ok {
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: size,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Check if we have a wasn't-compressed layer in storage that's based on that blob.
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobinfo.Digest, err)
+ }
+ if len(layers) > 0 {
+ // Save this for completeness.
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Check if we have a was-compressed layer in storage that's based on that blob.
+ layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobinfo.Digest, err)
+ }
+ if len(layers) > 0 {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].CompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Does the blob correspond to a known DiffID which we already have available?
+ // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
+ // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
+ if options.CanSubstitute || blobinfo.Size != -1 {
+ if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
+ }
+ if len(layers) > 0 {
+ if blobinfo.Size != -1 {
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, blobinfo, nil
+ }
+ if !options.CanSubstitute {
+ return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo)
+ }
+ s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: uncompressedDigest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
+ }
+
+ // Nope, we don't have it.
+ return false, types.BlobInfo{}, nil
+}
+
+// computeID computes a recommended image ID based on information we have so far. If
+// the manifest is not of a type that we recognize, we return an empty value, indicating
+// that since we don't have a recommendation, a random ID should be used if one needs
+// to be allocated.
+func (s *storageImageDestination) computeID(m manifest.Manifest) string {
+ // Build the diffID list. We need the decompressed sums that we've been calculating to
+ // fill in the DiffIDs. It's expected (but not enforced by us) that the number of
+ // diffIDs corresponds to the number of non-EmptyLayer entries in the history.
+ var diffIDs []digest.Digest
+ switch m := m.(type) {
+ case *manifest.Schema1:
+ // Build a list of the diffIDs we've generated for the non-throwaway FS layers,
+ // in reverse of the order in which they were originally listed.
+ for i, compat := range m.ExtractedV1Compatibility {
+ if compat.ThrowAway {
+ continue
+ }
+ blobSum := m.FSLayers[i].BlobSum
+ diffID, ok := s.blobDiffIDs[blobSum]
+ if !ok {
+ logrus.Infof("error looking up diffID for layer %q", blobSum.String())
+ return ""
+ }
+ diffIDs = append([]digest.Digest{diffID}, diffIDs...)
+ }
+ case *manifest.Schema2, *manifest.OCI1:
+ // We know the ID calculation for these formats doesn't actually use the diffIDs,
+ // so we don't need to populate the diffID list.
+ default:
+ return ""
+ }
+ id, err := m.ImageID(diffIDs)
+ if err != nil {
+ return ""
+ }
+ return id
+}
+
+// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
+// information out of it for Inspect().
+func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
+ if info.Digest == "" {
+ return nil, errors.New(`no digest supplied when reading blob`)
+ }
+ if err := info.Digest.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err)
+ }
+ // Assume it's a file, since we're only calling this from a place that expects to read files.
+ if filename, ok := s.filenames[info.Digest]; ok {
+ contents, err2 := os.ReadFile(filename)
+ if err2 != nil {
+ return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2)
+ }
+ return contents, nil
+ }
+ // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
+ return nil, errors.New("blob not found")
+}
+
+// queueOrCommit queues in the specified blob to be committed to the storage.
+// If no other goroutine is already committing layers, the layer and all
+// subsequent layers (if already queued) will be committed to the storage.
+func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error {
+ // NOTE: whenever the code below is touched, make sure that all code
+ // paths unlock the lock and to unlock it exactly once.
+ //
+ // Conceptually, the code is divided in two stages:
+ //
+ // 1) Queue in work by marking the layer as ready to be committed.
+ // If at least one previous/parent layer with a lower index has
+ // not yet been committed, return early.
+ //
+ // 2) Process the queued-in work by committing the "ready" layers
+ // in sequence. Make sure that more items can be queued-in
+ // during the comparatively I/O expensive task of committing a
+ // layer.
+ //
+ // The conceptual benefit of this design is that caller can continue
+ // pulling layers after an early return. At any given time, only one
+ // caller is the "worker" routine committing layers. All other routines
+ // can continue pulling and queuing in layers.
+ s.lock.Lock()
+ s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{
+ BlobInfo: blob,
+ EmptyLayer: emptyLayer,
+ }
+
+ // We're still waiting for at least one previous/parent layer to be
+ // committed, so there's nothing to do.
+ if index != s.currentIndex {
+ s.lock.Unlock()
+ return nil
+ }
+
+ for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] {
+ s.lock.Unlock()
+ // Note: commitLayer locks on-demand.
+ if err := s.commitLayer(ctx, *info, index); err != nil {
+ return err
+ }
+ s.lock.Lock()
+ index++
+ }
+
+ // Set the index at the very end to make sure that only one routine
+ // enters stage 2).
+ s.currentIndex = index
+ s.lock.Unlock()
+ return nil
+}
+
+// commitLayer commits the specified blob with the given index to the storage.
+// Note that the previous layer is expected to already be committed.
+//
+// Caution: this function must be called without holding `s.lock`. Callers
+// must guarantee that, at any given time, at most one goroutine may execute
+// `commitLayer()`.
+func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
+ // Already committed? Return early.
+ if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
+ return nil
+ }
+
+ // Start with an empty string or the previous layer ID. Note that
+ // `s.indexToStorageID` can only be accessed by *one* goroutine at any
+ // given time. Hence, we don't need to lock accesses.
+ var lastLayer string
+ if prev := s.indexToStorageID[index-1]; prev != nil {
+ lastLayer = *prev
+ }
+
+ // Carry over the previous ID for empty non-base layers.
+ if blob.EmptyLayer {
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ // Check if there's already a layer with the ID that we'd give to the result of applying
+ // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
+ s.lock.Lock()
+ diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
+ s.lock.Unlock()
+ if !haveDiffID {
+ // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
+ // or to even check if we had it.
+ // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
+ // that relies on using a blob digest that has never been seen by the store had better call
+ // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
+ // so far we are going to accommodate that (if we should be doing that at all).
+ logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
+ // NOTE: use `TryReusingBlob` to prevent recursion.
+ has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
+ if err != nil {
+ return fmt.Errorf("checking for a layer based on blob %q: %w", blob.Digest.String(), err)
+ }
+ if !has {
+ return fmt.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
+ }
+ diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
+ if !haveDiffID {
+ return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
+ }
+ }
+ id := diffID.Hex()
+ if lastLayer != "" {
+ id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
+ }
+ if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
+ // There's already a layer that should have the right contents, just reuse it.
+ lastLayer = layer.ID
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ s.lock.Lock()
+ diffOutput, ok := s.diffOutputs[blob.Digest]
+ s.lock.Unlock()
+ if ok {
+ layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
+ if err != nil {
+ return err
+ }
+
+ // FIXME: what to do with the uncompressed digest?
+ diffOutput.UncompressedDigest = blob.Digest
+
+ if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
+ _ = s.imageRef.transport.store.Delete(layer.ID)
+ return err
+ }
+
+ s.indexToStorageID[index] = &layer.ID
+ return nil
+ }
+
+ s.lock.Lock()
+ al, ok := s.blobAdditionalLayer[blob.Digest]
+ s.lock.Unlock()
+ if ok {
+ layer, err := al.PutAs(id, lastLayer, nil)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return fmt.Errorf("failed to put layer from digest and labels: %w", err)
+ }
+ lastLayer = layer.ID
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ // Check if we previously cached a file with that blob's contents. If we didn't,
+ // then we need to read the desired contents from a layer.
+ s.lock.Lock()
+ filename, ok := s.filenames[blob.Digest]
+ s.lock.Unlock()
+ if !ok {
+ // Try to find the layer with contents matching that blobsum.
+ layer := ""
+ layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ } else {
+ layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ }
+ }
+ if layer == "" {
+ return fmt.Errorf("locating layer for blob %q: %w", blob.Digest, err2)
+ }
+ // Read the layer's contents.
+ noCompression := archive.Uncompressed
+ diffOptions := &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
+ if err2 != nil {
+ return fmt.Errorf("reading layer %q for blob %q: %w", layer, blob.Digest, err2)
+ }
+ // Copy the layer diff to a file. Diff() takes a lock that it holds
+ // until the ReadCloser that it returns is closed, and PutLayer() wants
+ // the same lock, so the diff can't just be directly streamed from one
+ // to the other.
+ filename = s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ diff.Close()
+ return fmt.Errorf("creating temporary file %q: %w", filename, err)
+ }
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using
+ // ctx.Done().
+ _, err = io.Copy(file, diff)
+ diff.Close()
+ file.Close()
+ if err != nil {
+ return fmt.Errorf("storing blob to file %q: %w", filename, err)
+ }
+ // Make sure that we can find this file later, should we need the layer's
+ // contents again.
+ s.lock.Lock()
+ s.filenames[blob.Digest] = filename
+ s.lock.Unlock()
+ }
+ // Read the cached blob and use it as a diff.
+ file, err := os.Open(filename)
+ if err != nil {
+ return fmt.Errorf("opening file %q: %w", filename, err)
+ }
+ defer file.Close()
+ // Build the new layer using the diff, regardless of where it came from.
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
+ OriginalDigest: blob.Digest,
+ UncompressedDigest: diffID,
+ }, file)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return fmt.Errorf("adding layer with blob %q: %w", blob.Digest, err)
+ }
+
+ s.indexToStorageID[index] = &layer.ID
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ if len(s.manifest) == 0 {
+ return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
+ }
+ toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return fmt.Errorf("retrieving top-level manifest: %w", err)
+ }
+ // If the name we're saving to includes a digest, then check that the
+ // manifests that we're about to save all either match the one from the
+ // unparsedToplevel, or match the digest in the name that we're using.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ if !matches {
+ matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ }
+ if !matches {
+ return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
+ }
+ }
+ }
+ // Find the list of layer blobs.
+ man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
+ if err != nil {
+ return fmt.Errorf("parsing manifest: %w", err)
+ }
+ layerBlobs := man.LayerInfos()
+
+ // Extract, commit, or find the layers.
+ for i, blob := range layerBlobs {
+ if err := s.commitLayer(ctx, blob, i); err != nil {
+ return err
+ }
+ }
+ var lastLayer string
+ if len(layerBlobs) > 0 { // Can happen when using caches
+ prev := s.indexToStorageID[len(layerBlobs)-1]
+ if prev == nil {
+ return fmt.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
+ }
+ lastLayer = *prev
+ }
+
+ // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
+ // was originally created, in case we're just copying it. If not, no harm done.
+ options := &storage.ImageOptions{}
+ if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
+ logrus.Debugf("setting image creation date to %s", inspect.Created)
+ options.CreationDate = *inspect.Created
+ }
+ // Create the image record, pointing to the most-recently added layer.
+ intendedID := s.imageRef.id
+ if intendedID == "" {
+ intendedID = s.computeID(man)
+ }
+ oldNames := []string{}
+ img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
+ if err != nil {
+ if !errors.Is(err, storage.ErrDuplicateID) {
+ logrus.Debugf("error creating image: %q", err)
+ return fmt.Errorf("creating image %q: %w", intendedID, err)
+ }
+ img, err = s.imageRef.transport.store.Image(intendedID)
+ if err != nil {
+ return fmt.Errorf("reading image %q: %w", intendedID, err)
+ }
+ if img.TopLayer != lastLayer {
+ logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
+ return fmt.Errorf("image with ID %q already exists, but uses a different top layer: %w", intendedID, storage.ErrDuplicateID)
+ }
+ logrus.Debugf("reusing image ID %q", img.ID)
+ oldNames = append(oldNames, img.Names...)
+ } else {
+ logrus.Debugf("created new image ID %q", img.ID)
+ }
+
+ // Clean up the unfinished image on any error.
+ // (Is this the right thing to do if the image has existed before?)
+ commitSucceeded := false
+ defer func() {
+ if !commitSucceeded {
+ logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames)
+ if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil {
+ logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err)
+ }
+ }
+ }()
+
+ // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
+ // we just need to screen out the ones that are actually layers to get the list of non-layers.
+ dataBlobs := make(map[digest.Digest]struct{})
+ for blob := range s.filenames {
+ dataBlobs[blob] = struct{}{}
+ }
+ for _, layerBlob := range layerBlobs {
+ delete(dataBlobs, layerBlob.Digest)
+ }
+ for blob := range dataBlobs {
+ v, err := os.ReadFile(s.filenames[blob])
+ if err != nil {
+ return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
+ }
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
+ logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
+ return fmt.Errorf("saving big data %q for image %q: %w", blob.String(), img.ID, err)
+ }
+ }
+ // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below.
+ if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
+ manifestDigest, err := manifest.Digest(toplevelManifest)
+ if err != nil {
+ return fmt.Errorf("digesting top-level manifest: %w", err)
+ }
+ key := manifestBigDataKey(manifestDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
+ logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving top-level manifest for image %q: %w", img.ID, err)
+ }
+ }
+ // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
+ // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
+ // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
+ key := manifestBigDataKey(s.manifestDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
+ logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving manifest for image %q: %w", img.ID, err)
+ }
+ key = storage.ImageDigestBigDataKey
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
+ logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving manifest for image %q: %w", img.ID, err)
+ }
+ // Save the signatures, if we have any.
+ if len(s.signatures) > 0 {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
+ logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving signatures for image %q: %w", img.ID, err)
+ }
+ }
+ for instanceDigest, signatures := range s.signatureses {
+ key := signatureBigDataKey(instanceDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
+ logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving signatures for image %q: %w", img.ID, err)
+ }
+ }
+ // Save our metadata.
+ metadata, err := json.Marshal(s)
+ if err != nil {
+ logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
+ return fmt.Errorf("encoding metadata for image %q: %w", img.ID, err)
+ }
+ if len(metadata) != 0 {
+ if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
+ logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving metadata for image %q: %w", img.ID, err)
+ }
+ logrus.Debugf("saved image metadata %q", string(metadata))
+ }
+ // Adds the reference's name on the image. We don't need to worry about avoiding duplicate
+ // values because AddNames() will deduplicate the list that we pass to it.
+ if name := s.imageRef.DockerReference(); name != nil {
+ if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil {
+ return fmt.Errorf("adding names %v to image %q: %w", name, img.ID, err)
+ }
+ logrus.Debugf("added name %q to image %q", name, img.ID)
+ }
+
+ commitSucceeded = true
+ return nil
+}
+
+// PutManifest writes the manifest to the destination.
+func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
+ digest, err := manifest.Digest(manifestBlob)
+ if err != nil {
+ return err
+ }
+ newBlob := make([]byte, len(manifestBlob))
+ copy(newBlob, manifestBlob)
+ s.manifest = newBlob
+ s.manifestDigest = digest
+ return nil
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ sizes := []int{}
+ sigblob := []byte{}
+ for _, sigWithFormat := range signatures {
+ sig, err := signature.Blob(sigWithFormat)
+ if err != nil {
+ return err
+ }
+ sizes = append(sizes, len(sig))
+ newblob := make([]byte, len(sigblob)+len(sig))
+ copy(newblob, sigblob)
+ copy(newblob[len(sigblob):], sig)
+ sigblob = newblob
+ }
+ if instanceDigest == nil {
+ s.signatures = sigblob
+ s.SignatureSizes = sizes
+ if len(s.manifest) > 0 {
+ manifestDigest := s.manifestDigest
+ instanceDigest = &manifestDigest
+ }
+ }
+ if instanceDigest != nil {
+ s.signatureses[*instanceDigest] = sigblob
+ s.SignaturesSizes[*instanceDigest] = sizes
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index 06f90d363..9f16dd334 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -4,93 +4,20 @@
package storage
import (
- "bytes"
"context"
- "encoding/json"
- stderrors "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
- "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
- "github.com/containers/image/v5/internal/private"
- "github.com/containers/image/v5/internal/putblobdigest"
- "github.com/containers/image/v5/internal/tmpdir"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
- graphdriver "github.com/containers/storage/drivers"
- "github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/chunked"
- "github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
var (
- // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
- // with a digest-based name that doesn't match its contents.
- // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value),
- // and there is no known user of this error.
- ErrBlobDigestMismatch = stderrors.New("blob digest mismatch")
- // ErrBlobSizeMismatch is returned when PutBlob() is given a blob
- // with an expected size that doesn't match the reader.
- ErrBlobSizeMismatch = stderrors.New("blob size mismatch")
// ErrNoSuchImage is returned when we attempt to access an image which
// doesn't exist in the storage area.
ErrNoSuchImage = storage.ErrNotAnImage
)
-type storageImageSource struct {
- imageRef storageReference
- image *storage.Image
- systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
- layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
- cachedManifest []byte // A cached copy of the manifest, if already known, or nil
- getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
- SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
-}
-
-type storageImageDestination struct {
- imageRef storageReference
- directory string // Temporary directory where we store blobs until Commit() time
- nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
- manifest []byte // Manifest contents, temporary
- manifestDigest digest.Digest // Valid if len(manifest) != 0
- signatures []byte // Signature contents, temporary
- signatureses map[digest.Digest][]byte // Instance signature contents, temporary
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
- SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
-
- // A storage destination may be used concurrently. Accesses are
- // serialized via a mutex. Please refer to the individual comments
- // below for details.
- lock sync.Mutex
- // Mapping from layer (by index) to the associated ID in the storage.
- // It's protected *implicitly* since `commitLayer()`, at any given
- // time, can only be executed by *one* goroutine. Please refer to
- // `queueOrCommit()` for further details on how the single-caller
- // guarantee is implemented.
- indexToStorageID map[int]*string
- // All accesses to below data are protected by `lock` which is made
- // *explicit* in the code.
- blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
- fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
- filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
- currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
- indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
- blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
- diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
-}
-
type storageImageCloser struct {
types.ImageCloser
size int64
@@ -109,1230 +36,6 @@ func signatureBigDataKey(digest digest.Digest) string {
return "signature-" + digest.Encoded()
}
-// newImageSource sets up an image for reading.
-func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
- // First, locate the image.
- img, err := imageRef.resolveImage(sys)
- if err != nil {
- return nil, err
- }
-
- // Build the reader object.
- image := &storageImageSource{
- imageRef: imageRef,
- systemContext: sys,
- image: img,
- layerPosition: make(map[digest.Digest]int),
- SignatureSizes: []int{},
- SignaturesSizes: make(map[digest.Digest][]int),
- }
- if img.Metadata != "" {
- if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
- return nil, errors.Wrap(err, "decoding metadata for source image")
- }
- }
- return image, nil
-}
-
-// Reference returns the image reference that we used to find this image.
-func (s *storageImageSource) Reference() types.ImageReference {
- return s.imageRef
-}
-
-// Close cleans up any resources we tied up while reading the image.
-func (s *storageImageSource) Close() error {
- return nil
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *storageImageSource) HasThreadSafeGetBlob() bool {
- return true
-}
-
-// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
-// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
-func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
- if info.Digest == image.GzippedEmptyLayerDigest {
- return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
- }
-
- // NOTE: the blob is first written to a temporary file and subsequently
- // closed. The intention is to keep the time we own the storage lock
- // as short as possible to allow other processes to access the storage.
- rc, n, _, err = s.getBlobAndLayerID(info)
- if err != nil {
- return nil, 0, err
- }
- defer rc.Close()
-
- tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
- if err != nil {
- return nil, 0, err
- }
-
- if _, err := io.Copy(tmpFile, rc); err != nil {
- return nil, 0, err
- }
-
- if _, err := tmpFile.Seek(0, 0); err != nil {
- return nil, 0, err
- }
-
- wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error {
- defer os.Remove(tmpFile.Name())
- return tmpFile.Close()
- })
-
- return wrapper, n, err
-}
-
-// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
- var layer storage.Layer
- var diffOptions *storage.DiffOptions
- // We need a valid digest value.
- err = info.Digest.Validate()
- if err != nil {
- return nil, -1, "", err
- }
- // Check if the blob corresponds to a diff that was used to initialize any layers. Our
- // callers should try to retrieve layers using their uncompressed digests, so no need to
- // check if they're using one of the compressed digests, which we can't reproduce anyway.
- layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
-
- // If it's not a layer, then it must be a data item.
- if len(layers) == 0 {
- b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String())
- if err != nil {
- return nil, -1, "", err
- }
- r := bytes.NewReader(b)
- logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
- return io.NopCloser(r), int64(r.Len()), "", nil
- }
- // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
- // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
- // just go ahead and use the first one every time.
- s.getBlobMutex.Lock()
- i := s.layerPosition[info.Digest]
- s.layerPosition[info.Digest] = i + 1
- s.getBlobMutex.Unlock()
- if len(layers) > 0 {
- layer = layers[i%len(layers)]
- }
- // Force the storage layer to not try to match any compression that was used when the layer was first
- // handed to it.
- noCompression := archive.Uncompressed
- diffOptions = &storage.DiffOptions{
- Compression: &noCompression,
- }
- if layer.UncompressedSize < 0 {
- n = -1
- } else {
- n = layer.UncompressedSize
- }
- logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
- rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
- if err != nil {
- return nil, -1, "", err
- }
- return rc, n, layer.ID, err
-}
-
-// GetManifest() reads the image's manifest.
-func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
- if instanceDigest != nil {
- key := manifestBigDataKey(*instanceDigest)
- blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil {
- return nil, "", errors.Wrapf(err, "reading manifest for image instance %q", *instanceDigest)
- }
- return blob, manifest.GuessMIMEType(blob), err
- }
- if len(s.cachedManifest) == 0 {
- // The manifest is stored as a big data item.
- // Prefer the manifest corresponding to the user-specified digest, if available.
- if s.imageRef.named != nil {
- if digested, ok := s.imageRef.named.(reference.Digested); ok {
- key := manifestBigDataKey(digested.Digest())
- blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
- return nil, "", err
- }
- if err == nil {
- s.cachedManifest = blob
- }
- }
- }
- // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
- // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
- if len(s.cachedManifest) == 0 {
- cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
- if err != nil {
- return nil, "", err
- }
- s.cachedManifest = cachedBlob
- }
- }
- return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
-}
-
-// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
-// the image, after they've been decompressed.
-func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
- if err != nil {
- return nil, errors.Wrapf(err, "reading image manifest for %q", s.image.ID)
- }
- if manifest.MIMETypeIsMultiImage(manifestType) {
- return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)")
- }
- man, err := manifest.FromBlob(manifestBlob, manifestType)
- if err != nil {
- return nil, errors.Wrapf(err, "parsing image manifest for %q", s.image.ID)
- }
-
- uncompressedLayerType := ""
- switch manifestType {
- case imgspecv1.MediaTypeImageManifest:
- uncompressedLayerType = imgspecv1.MediaTypeImageLayer
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
- uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
- }
-
- physicalBlobInfos := []types.BlobInfo{}
- layerID := s.image.TopLayer
- for layerID != "" {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- return nil, errors.Wrapf(err, "reading layer %q in image %q", layerID, s.image.ID)
- }
- if layer.UncompressedDigest == "" {
- return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID)
- }
- if layer.UncompressedSize < 0 {
- return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID)
- }
- blobInfo := types.BlobInfo{
- Digest: layer.UncompressedDigest,
- Size: layer.UncompressedSize,
- MediaType: uncompressedLayerType,
- }
- physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
- layerID = layer.Parent
- }
-
- res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
- if err != nil {
- return nil, errors.Wrapf(err, "creating LayerInfosForCopy of image %q", s.image.ID)
- }
- return res, nil
-}
-
-// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
-// but using layer data which we can actually produce — physicalInfos for non-empty layers,
-// and image.GzippedEmptyLayer for empty ones.
-// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
-func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
- nextPhysical := 0
- res := make([]types.BlobInfo, len(manifestInfos))
- for i, mi := range manifestInfos {
- if mi.EmptyLayer {
- res[i] = types.BlobInfo{
- Digest: image.GzippedEmptyLayerDigest,
- Size: int64(len(image.GzippedEmptyLayer)),
- MediaType: mi.MediaType,
- }
- } else {
- if nextPhysical >= len(physicalInfos) {
- return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
- }
- res[i] = physicalInfos[nextPhysical]
- nextPhysical++
- }
- }
- if nextPhysical != len(physicalInfos) {
- return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
- }
- return res, nil
-}
-
-// GetSignatures() parses the image's signatures blob into a slice of byte slices.
-func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) {
- var offset int
- sigslice := [][]byte{}
- signature := []byte{}
- signatureSizes := s.SignatureSizes
- key := "signatures"
- instance := "default instance"
- if instanceDigest != nil {
- signatureSizes = s.SignaturesSizes[*instanceDigest]
- key = signatureBigDataKey(*instanceDigest)
- instance = instanceDigest.Encoded()
- }
- if len(signatureSizes) > 0 {
- signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil {
- return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s)", s.image.ID, instance)
- }
- signature = signatureBlob
- }
- for _, length := range signatureSizes {
- if offset+length > len(signature) {
- return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length)
- }
- sigslice = append(sigslice, signature[offset:offset+length])
- offset += length
- }
- if offset != len(signature) {
- return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset)
- }
- return sigslice, nil
-}
-
-// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
-// it's time to Commit() the image
-func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
- directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
- if err != nil {
- return nil, errors.Wrapf(err, "creating a temporary directory")
- }
- image := &storageImageDestination{
- imageRef: imageRef,
- directory: directory,
- signatureses: make(map[digest.Digest][]byte),
- blobDiffIDs: make(map[digest.Digest]digest.Digest),
- blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
- fileSizes: make(map[digest.Digest]int64),
- filenames: make(map[digest.Digest]string),
- SignatureSizes: []int{},
- SignaturesSizes: make(map[digest.Digest][]int),
- indexToStorageID: make(map[int]*string),
- indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
- diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
- }
- return image, nil
-}
-
-// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
-// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
-func (s *storageImageDestination) Reference() types.ImageReference {
- return s.imageRef
-}
-
-// Close cleans up the temporary directory and additional layer store handlers.
-func (s *storageImageDestination) Close() error {
- for _, al := range s.blobAdditionalLayer {
- al.Release()
- }
- for _, v := range s.diffOutputs {
- if v.Target != "" {
- _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
- }
- }
- return os.RemoveAll(s.directory)
-}
-
-func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression {
- // We ultimately have to decompress layers to populate trees on disk
- // and need to explicitly ask for it here, so that the layers' MIME
- // types can be set accordingly.
- return types.PreserveOriginal
-}
-
-func (s *storageImageDestination) computeNextBlobCacheFile() string {
- return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (s *storageImageDestination) HasThreadSafePutBlob() bool {
- return true
-}
-
-// PutBlobWithOptions writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
-// inputInfo.Size is the expected length of stream, if known.
-// inputInfo.MediaType describes the blob format, if known.
-// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
-// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
- info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options)
- if err != nil {
- return info, err
- }
-
- if options.IsConfig || options.LayerIndex == nil {
- return info, nil
- }
-
- return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
-// inputInfo.Size is the expected length of stream, if known.
-// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
-// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
-// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- return s.PutBlobWithOptions(ctx, stream, blobinfo, private.PutBlobOptions{
- Cache: cache,
- IsConfig: isConfig,
- })
-}
-
-// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
-// The caller must arrange the blob to be eventually committed using s.commitLayer().
-func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
- // Stores a layer or data blob in our temporary directory, checking that any information
- // in the blobinfo matches the incoming data.
- errorBlobInfo := types.BlobInfo{
- Digest: "",
- Size: -1,
- }
- if blobinfo.Digest != "" {
- if err := blobinfo.Digest.Validate(); err != nil {
- return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
- }
- }
-
- // Set up to digest the blob if necessary, and count its size while saving it to a file.
- filename := s.computeNextBlobCacheFile()
- file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
- if err != nil {
- return errorBlobInfo, errors.Wrapf(err, "creating temporary file %q", filename)
- }
- defer file.Close()
- counter := ioutils.NewWriteCounter(file)
- stream = io.TeeReader(stream, counter)
- digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
- decompressed, err := archive.DecompressStream(stream)
- if err != nil {
- return errorBlobInfo, errors.Wrap(err, "setting up to decompress blob")
- }
-
- diffID := digest.Canonical.Digester()
- // Copy the data to the file.
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- _, err = io.Copy(diffID.Hash(), decompressed)
- decompressed.Close()
- if err != nil {
- return errorBlobInfo, errors.Wrapf(err, "storing blob to file %q", filename)
- }
-
- // Determine blob properties, and fail if information that we were given about the blob
- // is known to be incorrect.
- blobDigest := digester.Digest()
- blobSize := blobinfo.Size
- if blobSize < 0 {
- blobSize = counter.Count
- } else if blobinfo.Size != counter.Count {
- return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch)
- }
-
- // Record information about the blob.
- s.lock.Lock()
- s.blobDiffIDs[blobDigest] = diffID.Digest()
- s.fileSizes[blobDigest] = counter.Count
- s.filenames[blobDigest] = filename
- s.lock.Unlock()
- // This is safe because we have just computed diffID, and blobDigest was either computed
- // by us, or validated by the caller (usually copy.digestingReader).
- options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
- return types.BlobInfo{
- Digest: blobDigest,
- Size: blobSize,
- MediaType: blobinfo.MediaType,
- }, nil
-}
-
-type zstdFetcher struct {
- chunkAccessor private.BlobChunkAccessor
- ctx context.Context
- blobInfo types.BlobInfo
-}
-
-// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
-func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
- var newChunks []private.ImageSourceChunk
- for _, v := range chunks {
- i := private.ImageSourceChunk{
- Offset: v.Offset,
- Length: v.Length,
- }
- newChunks = append(newChunks, i)
- }
- rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks)
- if _, ok := err.(private.BadPartialRequestError); ok {
- err = chunked.ErrBadRequest{}
- }
- return rc, errs, err
-
-}
-
-// PutBlobPartial attempts to create a blob using the data that is already present
-// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
-// It is available only if SupportsPutBlobPartial().
-// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
-// should fall back to PutBlobWithOptions.
-func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
- fetcher := zstdFetcher{
- chunkAccessor: chunkAccessor,
- ctx: ctx,
- blobInfo: srcInfo,
- }
-
- differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
- if err != nil {
- return srcInfo, err
- }
-
- out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
- if err != nil {
- return srcInfo, err
- }
-
- blobDigest := srcInfo.Digest
-
- s.lock.Lock()
- s.blobDiffIDs[blobDigest] = blobDigest
- s.fileSizes[blobDigest] = 0
- s.filenames[blobDigest] = ""
- s.diffOutputs[blobDigest] = out
- s.lock.Unlock()
-
- return srcInfo, nil
-}
-
-// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
- reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options)
- if err != nil || !reused || options.LayerIndex == nil {
- return reused, info, err
- }
-
- return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
-}
-
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- return s.TryReusingBlobWithOptions(ctx, blobinfo, private.TryReusingBlobOptions{
- Cache: cache,
- CanSubstitute: canSubstitute,
- })
-}
-
-// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
-// The caller must arrange the blob to be eventually committed using s.commitLayer().
-func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
- // lock the entire method as it executes fairly quickly
- s.lock.Lock()
- defer s.lock.Unlock()
-
- if options.SrcRef != nil {
- // Check if we have the layer in the underlying additional layer store.
- aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String())
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest)
- } else if err == nil {
- // Record the uncompressed value so that we can use it to calculate layer IDs.
- s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
- s.blobAdditionalLayer[blobinfo.Digest] = aLayer
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: aLayer.CompressedSize(),
- MediaType: blobinfo.MediaType,
- }, nil
- }
- }
-
- if blobinfo.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
- }
- if err := blobinfo.Digest.Validate(); err != nil {
- return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
- }
-
- // Check if we've already cached it in a file.
- if size, ok := s.fileSizes[blobinfo.Digest]; ok {
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: size,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Check if we have a wasn't-compressed layer in storage that's based on that blob.
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, blobinfo.Digest)
- }
- if len(layers) > 0 {
- // Save this for completeness.
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: layers[0].UncompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Check if we have a was-compressed layer in storage that's based on that blob.
- layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q`, blobinfo.Digest)
- }
- if len(layers) > 0 {
- // Record the uncompressed value so that we can use it to calculate layer IDs.
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: layers[0].CompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Does the blob correspond to a known DiffID which we already have available?
- // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
- // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
- if options.CanSubstitute || blobinfo.Size != -1 {
- if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest)
- }
- if len(layers) > 0 {
- if blobinfo.Size != -1 {
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, blobinfo, nil
- }
- if !options.CanSubstitute {
- return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo)
- }
- s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: uncompressedDigest,
- Size: layers[0].UncompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
- }
- }
-
- // Nope, we don't have it.
- return false, types.BlobInfo{}, nil
-}
-
-// computeID computes a recommended image ID based on information we have so far. If
-// the manifest is not of a type that we recognize, we return an empty value, indicating
-// that since we don't have a recommendation, a random ID should be used if one needs
-// to be allocated.
-func (s *storageImageDestination) computeID(m manifest.Manifest) string {
- // Build the diffID list. We need the decompressed sums that we've been calculating to
- // fill in the DiffIDs. It's expected (but not enforced by us) that the number of
- // diffIDs corresponds to the number of non-EmptyLayer entries in the history.
- var diffIDs []digest.Digest
- switch m := m.(type) {
- case *manifest.Schema1:
- // Build a list of the diffIDs we've generated for the non-throwaway FS layers,
- // in reverse of the order in which they were originally listed.
- for i, compat := range m.ExtractedV1Compatibility {
- if compat.ThrowAway {
- continue
- }
- blobSum := m.FSLayers[i].BlobSum
- diffID, ok := s.blobDiffIDs[blobSum]
- if !ok {
- logrus.Infof("error looking up diffID for layer %q", blobSum.String())
- return ""
- }
- diffIDs = append([]digest.Digest{diffID}, diffIDs...)
- }
- case *manifest.Schema2, *manifest.OCI1:
- // We know the ID calculation for these formats doesn't actually use the diffIDs,
- // so we don't need to populate the diffID list.
- default:
- return ""
- }
- id, err := m.ImageID(diffIDs)
- if err != nil {
- return ""
- }
- return id
-}
-
-// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
-// information out of it for Inspect().
-func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
- if info.Digest == "" {
- return nil, errors.Errorf(`no digest supplied when reading blob`)
- }
- if err := info.Digest.Validate(); err != nil {
- return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`)
- }
- // Assume it's a file, since we're only calling this from a place that expects to read files.
- if filename, ok := s.filenames[info.Digest]; ok {
- contents, err2 := os.ReadFile(filename)
- if err2 != nil {
- return nil, errors.Wrapf(err2, `reading blob from file %q`, filename)
- }
- return contents, nil
- }
- // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
- return nil, errors.New("blob not found")
-}
-
-// queueOrCommit queues in the specified blob to be committed to the storage.
-// If no other goroutine is already committing layers, the layer and all
-// subsequent layers (if already queued) will be committed to the storage.
-func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error {
- // NOTE: whenever the code below is touched, make sure that all code
- // paths unlock the lock and to unlock it exactly once.
- //
- // Conceptually, the code is divided in two stages:
- //
- // 1) Queue in work by marking the layer as ready to be committed.
- // If at least one previous/parent layer with a lower index has
- // not yet been committed, return early.
- //
- // 2) Process the queued-in work by committing the "ready" layers
- // in sequence. Make sure that more items can be queued-in
- // during the comparatively I/O expensive task of committing a
- // layer.
- //
- // The conceptual benefit of this design is that caller can continue
- // pulling layers after an early return. At any given time, only one
- // caller is the "worker" routine committing layers. All other routines
- // can continue pulling and queuing in layers.
- s.lock.Lock()
- s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{
- BlobInfo: blob,
- EmptyLayer: emptyLayer,
- }
-
- // We're still waiting for at least one previous/parent layer to be
- // committed, so there's nothing to do.
- if index != s.currentIndex {
- s.lock.Unlock()
- return nil
- }
-
- for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] {
- s.lock.Unlock()
- // Note: commitLayer locks on-demand.
- if err := s.commitLayer(ctx, *info, index); err != nil {
- return err
- }
- s.lock.Lock()
- index++
- }
-
- // Set the index at the very end to make sure that only one routine
- // enters stage 2).
- s.currentIndex = index
- s.lock.Unlock()
- return nil
-}
-
-// commitLayer commits the specified blob with the given index to the storage.
-// Note that the previous layer is expected to already be committed.
-//
-// Caution: this function must be called without holding `s.lock`. Callers
-// must guarantee that, at any given time, at most one goroutine may execute
-// `commitLayer()`.
-func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
- // Already committed? Return early.
- if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
- return nil
- }
-
- // Start with an empty string or the previous layer ID. Note that
- // `s.indexToStorageID` can only be accessed by *one* goroutine at any
- // given time. Hence, we don't need to lock accesses.
- var lastLayer string
- if prev := s.indexToStorageID[index-1]; prev != nil {
- lastLayer = *prev
- }
-
- // Carry over the previous ID for empty non-base layers.
- if blob.EmptyLayer {
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- // Check if there's already a layer with the ID that we'd give to the result of applying
- // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
- s.lock.Lock()
- diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
- s.lock.Unlock()
- if !haveDiffID {
- // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
- // or to even check if we had it.
- // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
- // that relies on using a blob digest that has never been seen by the store had better call
- // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
- // so far we are going to accommodate that (if we should be doing that at all).
- logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- // NOTE: use `TryReusingBlob` to prevent recursion.
- has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
- if err != nil {
- return errors.Wrapf(err, "checking for a layer based on blob %q", blob.Digest.String())
- }
- if !has {
- return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
- }
- diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
- if !haveDiffID {
- return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
- }
- }
- id := diffID.Hex()
- if lastLayer != "" {
- id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
- }
- if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
- // There's already a layer that should have the right contents, just reuse it.
- lastLayer = layer.ID
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- s.lock.Lock()
- diffOutput, ok := s.diffOutputs[blob.Digest]
- s.lock.Unlock()
- if ok {
- layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
- if err != nil {
- return err
- }
-
- // FIXME: what to do with the uncompressed digest?
- diffOutput.UncompressedDigest = blob.Digest
-
- if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
- _ = s.imageRef.transport.store.Delete(layer.ID)
- return err
- }
-
- s.indexToStorageID[index] = &layer.ID
- return nil
- }
-
- s.lock.Lock()
- al, ok := s.blobAdditionalLayer[blob.Digest]
- s.lock.Unlock()
- if ok {
- layer, err := al.PutAs(id, lastLayer, nil)
- if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
- return errors.Wrapf(err, "failed to put layer from digest and labels")
- }
- lastLayer = layer.ID
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- // Check if we previously cached a file with that blob's contents. If we didn't,
- // then we need to read the desired contents from a layer.
- s.lock.Lock()
- filename, ok := s.filenames[blob.Digest]
- s.lock.Unlock()
- if !ok {
- // Try to find the layer with contents matching that blobsum.
- layer := ""
- layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
- if err2 == nil && len(layers) > 0 {
- layer = layers[0].ID
- } else {
- layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
- if err2 == nil && len(layers) > 0 {
- layer = layers[0].ID
- }
- }
- if layer == "" {
- return errors.Wrapf(err2, "locating layer for blob %q", blob.Digest)
- }
- // Read the layer's contents.
- noCompression := archive.Uncompressed
- diffOptions := &storage.DiffOptions{
- Compression: &noCompression,
- }
- diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
- if err2 != nil {
- return errors.Wrapf(err2, "reading layer %q for blob %q", layer, blob.Digest)
- }
- // Copy the layer diff to a file. Diff() takes a lock that it holds
- // until the ReadCloser that it returns is closed, and PutLayer() wants
- // the same lock, so the diff can't just be directly streamed from one
- // to the other.
- filename = s.computeNextBlobCacheFile()
- file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
- if err != nil {
- diff.Close()
- return errors.Wrapf(err, "creating temporary file %q", filename)
- }
- // Copy the data to the file.
- // TODO: This can take quite some time, and should ideally be cancellable using
- // ctx.Done().
- _, err = io.Copy(file, diff)
- diff.Close()
- file.Close()
- if err != nil {
- return errors.Wrapf(err, "storing blob to file %q", filename)
- }
- // Make sure that we can find this file later, should we need the layer's
- // contents again.
- s.lock.Lock()
- s.filenames[blob.Digest] = filename
- s.lock.Unlock()
- }
- // Read the cached blob and use it as a diff.
- file, err := os.Open(filename)
- if err != nil {
- return errors.Wrapf(err, "opening file %q", filename)
- }
- defer file.Close()
- // Build the new layer using the diff, regardless of where it came from.
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
- OriginalDigest: blob.Digest,
- UncompressedDigest: diffID,
- }, file)
- if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
- return errors.Wrapf(err, "adding layer with blob %q", blob.Digest)
- }
-
- s.indexToStorageID[index] = &layer.ID
- return nil
-}
-
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
-// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- if len(s.manifest) == 0 {
- return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
- }
- toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
- if err != nil {
- return errors.Wrapf(err, "retrieving top-level manifest")
- }
- // If the name we're saving to includes a digest, then check that the
- // manifests that we're about to save all either match the one from the
- // unparsedToplevel, or match the digest in the name that we're using.
- if s.imageRef.named != nil {
- if digested, ok := s.imageRef.named.(reference.Digested); ok {
- matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
- if err != nil {
- return err
- }
- if !matches {
- matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
- if err != nil {
- return err
- }
- }
- if !matches {
- return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
- }
- }
- }
- // Find the list of layer blobs.
- man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
- if err != nil {
- return errors.Wrapf(err, "parsing manifest")
- }
- layerBlobs := man.LayerInfos()
-
- // Extract, commit, or find the layers.
- for i, blob := range layerBlobs {
- if err := s.commitLayer(ctx, blob, i); err != nil {
- return err
- }
- }
- var lastLayer string
- if len(layerBlobs) > 0 { // Can happen when using caches
- prev := s.indexToStorageID[len(layerBlobs)-1]
- if prev == nil {
- return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
- }
- lastLayer = *prev
- }
-
- // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
- // was originally created, in case we're just copying it. If not, no harm done.
- options := &storage.ImageOptions{}
- if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
- logrus.Debugf("setting image creation date to %s", inspect.Created)
- options.CreationDate = *inspect.Created
- }
- // Create the image record, pointing to the most-recently added layer.
- intendedID := s.imageRef.id
- if intendedID == "" {
- intendedID = s.computeID(man)
- }
- oldNames := []string{}
- img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
- if err != nil {
- if errors.Cause(err) != storage.ErrDuplicateID {
- logrus.Debugf("error creating image: %q", err)
- return errors.Wrapf(err, "creating image %q", intendedID)
- }
- img, err = s.imageRef.transport.store.Image(intendedID)
- if err != nil {
- return errors.Wrapf(err, "reading image %q", intendedID)
- }
- if img.TopLayer != lastLayer {
- logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
- return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID)
- }
- logrus.Debugf("reusing image ID %q", img.ID)
- oldNames = append(oldNames, img.Names...)
- } else {
- logrus.Debugf("created new image ID %q", img.ID)
- }
-
- // Clean up the unfinished image on any error.
- // (Is this the right thing to do if the image has existed before?)
- commitSucceeded := false
- defer func() {
- if !commitSucceeded {
- logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames)
- if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil {
- logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err)
- }
- }
- }()
-
- // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
- // we just need to screen out the ones that are actually layers to get the list of non-layers.
- dataBlobs := make(map[digest.Digest]struct{})
- for blob := range s.filenames {
- dataBlobs[blob] = struct{}{}
- }
- for _, layerBlob := range layerBlobs {
- delete(dataBlobs, layerBlob.Digest)
- }
- for blob := range dataBlobs {
- v, err := os.ReadFile(s.filenames[blob])
- if err != nil {
- return errors.Wrapf(err, "copying non-layer blob %q to image", blob)
- }
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
- logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
- return errors.Wrapf(err, "saving big data %q for image %q", blob.String(), img.ID)
- }
- }
- // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below.
- if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
- manifestDigest, err := manifest.Digest(toplevelManifest)
- if err != nil {
- return errors.Wrapf(err, "digesting top-level manifest")
- }
- key := manifestBigDataKey(manifestDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
- logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving top-level manifest for image %q", img.ID)
- }
- }
- // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
- // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
- // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
- key := manifestBigDataKey(s.manifestDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
- logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving manifest for image %q", img.ID)
- }
- key = storage.ImageDigestBigDataKey
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
- logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving manifest for image %q", img.ID)
- }
- // Save the signatures, if we have any.
- if len(s.signatures) > 0 {
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
- logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving signatures for image %q", img.ID)
- }
- }
- for instanceDigest, signatures := range s.signatureses {
- key := signatureBigDataKey(instanceDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
- logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving signatures for image %q", img.ID)
- }
- }
- // Save our metadata.
- metadata, err := json.Marshal(s)
- if err != nil {
- logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "encoding metadata for image %q", img.ID)
- }
- if len(metadata) != 0 {
- if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
- logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving metadata for image %q", img.ID)
- }
- logrus.Debugf("saved image metadata %q", string(metadata))
- }
- // Adds the reference's name on the image. We don't need to worry about avoiding duplicate
- // values because AddNames() will deduplicate the list that we pass to it.
- if name := s.imageRef.DockerReference(); name != nil {
- if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil {
- return errors.Wrapf(err, "adding names %v to image %q", name, img.ID)
- }
- logrus.Debugf("added name %q to image %q", name, img.ID)
- }
-
- commitSucceeded = true
- return nil
-}
-
-var manifestMIMETypes = []string{
- imgspecv1.MediaTypeImageManifest,
- manifest.DockerV2Schema2MediaType,
- manifest.DockerV2Schema1SignedMediaType,
- manifest.DockerV2Schema1MediaType,
-}
-
-func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
- return manifestMIMETypes
-}
-
-// PutManifest writes the manifest to the destination.
-func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
- digest, err := manifest.Digest(manifestBlob)
- if err != nil {
- return err
- }
- newBlob := make([]byte, len(manifestBlob))
- copy(newBlob, manifestBlob)
- s.manifest = newBlob
- s.manifestDigest = digest
- return nil
-}
-
-// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was
-// previously supplied to PutSignatures().
-func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be
-// uploaded to the image destination, true otherwise.
-func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (s *storageImageDestination) MustMatchRuntimeOS() bool {
- return true
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool {
- return true // Yes, we want the unmodified manifest
-}
-
-// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
-func (s *storageImageDestination) SupportsPutBlobPartial() bool {
- return true
-}
-
-// PutSignatures records the image's signatures for committing as a single data blob.
-func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- sizes := []int{}
- sigblob := []byte{}
- for _, sig := range signatures {
- sizes = append(sizes, len(sig))
- newblob := make([]byte, len(sigblob)+len(sig))
- copy(newblob, sigblob)
- copy(newblob[len(sigblob):], sig)
- sigblob = newblob
- }
- if instanceDigest == nil {
- s.signatures = sigblob
- s.SignatureSizes = sizes
- if len(s.manifest) > 0 {
- manifestDigest := s.manifestDigest
- instanceDigest = &manifestDigest
- }
- }
- if instanceDigest != nil {
- s.signatureses[*instanceDigest] = sigblob
- s.SignaturesSizes[*instanceDigest] = sizes
- }
- return nil
-}
-
-// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
-// signatures, and the uncompressed sizes of all of the image's layers.
-func (s *storageImageSource) getSize() (int64, error) {
- var sum int64
- // Size up the data blobs.
- dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
- if err != nil {
- return -1, errors.Wrapf(err, "reading image %q", s.image.ID)
- }
- for _, dataName := range dataNames {
- bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
- if err != nil {
- return -1, errors.Wrapf(err, "reading data blob size %q for %q", dataName, s.image.ID)
- }
- sum += bigSize
- }
- // Add the signature sizes.
- for _, sigSize := range s.SignatureSizes {
- sum += int64(sigSize)
- }
- // Walk the layer list.
- layerID := s.image.TopLayer
- for layerID != "" {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- return -1, err
- }
- if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
- return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
- }
- sum += layer.UncompressedSize
- if layer.Parent == "" {
- break
- }
- layerID = layer.Parent
- }
- return sum, nil
-}
-
-// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
-// signatures, and the uncompressed sizes of all of the image's layers.
-func (s *storageImageSource) Size() (int64, error) {
- return s.getSize()
-}
-
// Size() returns the previously-computed size of the image, with no error.
func (s *storageImageCloser) Size() (int64, error) {
return s.size, nil
diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go
index 7c6da112c..dbb9804a6 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go
@@ -5,6 +5,7 @@ package storage
import (
"context"
+ "fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -31,11 +31,11 @@ func newReference(transport storageTransport, named reference.Named, id string)
return nil, ErrInvalidReference
}
if named != nil && reference.IsNameOnly(named) {
- return nil, errors.Wrapf(ErrInvalidReference, "reference %s has neither a tag nor a digest", named.String())
+ return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference)
}
if id != "" {
if err := validateImageID(id); err != nil {
- return nil, errors.Wrapf(ErrInvalidReference, "invalid ID value %q: %v", id, err)
+ return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference)
}
}
// We take a copy of the transport, which contains a pointer to the
@@ -145,12 +145,12 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
}
if s.id == "" {
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
- return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport())
+ return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
}
if loadedImage == nil {
img, err := s.transport.store.Image(s.id)
if err != nil {
- return nil, errors.Wrapf(err, "reading image %q", s.id)
+ return nil, fmt.Errorf("reading image %q: %w", s.id, err)
}
loadedImage = img
}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go
new file mode 100644
index 000000000..d4288dade
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/storage/storage_src.go
@@ -0,0 +1,380 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+type storageImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoGetBlobAtInitialize
+
+ imageRef storageReference
+ image *storage.Image
+ systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
+ layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
+ cachedManifest []byte // A cached copy of the manifest, if already known, or nil
+ getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
+}
+
+// newImageSource sets up an image for reading.
+func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
+ // First, locate the image.
+ img, err := imageRef.resolveImage(sys)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build the reader object.
+ image := &storageImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef),
+
+ imageRef: imageRef,
+ systemContext: sys,
+ image: img,
+ layerPosition: make(map[digest.Digest]int),
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
+ }
+ image.Compat = impl.AddCompat(image)
+ if img.Metadata != "" {
+ if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
+ return nil, fmt.Errorf("decoding metadata for source image: %w", err)
+ }
+ }
+ return image, nil
+}
+
+// Reference returns the image reference that we used to find this image.
+func (s *storageImageSource) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up any resources we tied up while reading the image.
+func (s *storageImageSource) Close() error {
+ return nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
+ if info.Digest == image.GzippedEmptyLayerDigest {
+ return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
+ }
+
+ // NOTE: the blob is first written to a temporary file and subsequently
+ // closed. The intention is to keep the time we own the storage lock
+ // as short as possible to allow other processes to access the storage.
+ rc, n, _, err = s.getBlobAndLayerID(info)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer rc.Close()
+
+ tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if _, err := io.Copy(tmpFile, rc); err != nil {
+ return nil, 0, err
+ }
+
+ if _, err := tmpFile.Seek(0, 0); err != nil {
+ return nil, 0, err
+ }
+
+ wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error {
+ defer os.Remove(tmpFile.Name())
+ return tmpFile.Close()
+ })
+
+ return wrapper, n, err
+}
+
+// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
+func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
+ var layer storage.Layer
+ var diffOptions *storage.DiffOptions
+ // We need a valid digest value.
+ err = info.Digest.Validate()
+ if err != nil {
+ return nil, -1, "", err
+ }
+ // Check if the blob corresponds to a diff that was used to initialize any layers. Our
+ // callers should try to retrieve layers using their uncompressed digests, so no need to
+ // check if they're using one of the compressed digests, which we can't reproduce anyway.
+ layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
+
+ // If it's not a layer, then it must be a data item.
+ if len(layers) == 0 {
+ b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String())
+ if err != nil {
+ return nil, -1, "", err
+ }
+ r := bytes.NewReader(b)
+ logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
+ return io.NopCloser(r), int64(r.Len()), "", nil
+ }
+ // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
+ // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
+ // just go ahead and use the first one every time.
+ s.getBlobMutex.Lock()
+ i := s.layerPosition[info.Digest]
+ s.layerPosition[info.Digest] = i + 1
+ s.getBlobMutex.Unlock()
+ if len(layers) > 0 {
+ layer = layers[i%len(layers)]
+ }
+ // Force the storage layer to not try to match any compression that was used when the layer was first
+ // handed to it.
+ noCompression := archive.Uncompressed
+ diffOptions = &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ if layer.UncompressedSize < 0 {
+ n = -1
+ } else {
+ n = layer.UncompressedSize
+ }
+ logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
+ rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
+ if err != nil {
+ return nil, -1, "", err
+ }
+ return rc, n, layer.ID, err
+}
+
+// GetManifest() reads the image's manifest.
+func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
+ if instanceDigest != nil {
+ key := manifestBigDataKey(*instanceDigest)
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
+ }
+ return blob, manifest.GuessMIMEType(blob), err
+ }
+ if len(s.cachedManifest) == 0 {
+ // The manifest is stored as a big data item.
+ // Prefer the manifest corresponding to the user-specified digest, if available.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ key := manifestBigDataKey(digested.Digest())
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
+ return nil, "", err
+ }
+ if err == nil {
+ s.cachedManifest = blob
+ }
+ }
+ }
+ // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
+ // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
+ if len(s.cachedManifest) == 0 {
+ cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
+ if err != nil {
+ return nil, "", err
+ }
+ s.cachedManifest = cachedBlob
+ }
+ }
+ return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
+}
+
+// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
+// the image, after they've been decompressed.
+func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("reading image manifest for %q: %w", s.image.ID, err)
+ }
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ return nil, errors.New("can't copy layers for a manifest list (shouldn't be attempted)")
+ }
+ man, err := manifest.FromBlob(manifestBlob, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing image manifest for %q: %w", s.image.ID, err)
+ }
+
+ uncompressedLayerType := ""
+ switch manifestType {
+ case imgspecv1.MediaTypeImageManifest:
+ uncompressedLayerType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
+ }
+
+ physicalBlobInfos := []types.BlobInfo{}
+ layerID := s.image.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
+ }
+ if layer.UncompressedDigest == "" {
+ return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID)
+ }
+ if layer.UncompressedSize < 0 {
+ return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
+ }
+ blobInfo := types.BlobInfo{
+ Digest: layer.UncompressedDigest,
+ Size: layer.UncompressedSize,
+ MediaType: uncompressedLayerType,
+ }
+ physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
+ layerID = layer.Parent
+ }
+
+ res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
+ if err != nil {
+ return nil, fmt.Errorf("creating LayerInfosForCopy of image %q: %w", s.image.ID, err)
+ }
+ return res, nil
+}
+
+// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
+// but using layer data which we can actually produce — physicalInfos for non-empty layers,
+// and image.GzippedEmptyLayer for empty ones.
+// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
+func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
+ nextPhysical := 0
+ res := make([]types.BlobInfo, len(manifestInfos))
+ for i, mi := range manifestInfos {
+ if mi.EmptyLayer {
+ res[i] = types.BlobInfo{
+ Digest: image.GzippedEmptyLayerDigest,
+ Size: int64(len(image.GzippedEmptyLayer)),
+ MediaType: mi.MediaType,
+ }
+ } else {
+ if nextPhysical >= len(physicalInfos) {
+ return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
+ }
+ res[i] = physicalInfos[nextPhysical]
+ nextPhysical++
+ }
+ }
+ if nextPhysical != len(physicalInfos) {
+ return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
+ }
+ return res, nil
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ var offset int
+ signatureBlobs := []byte{}
+ signatureSizes := s.SignatureSizes
+ key := "signatures"
+ instance := "default instance"
+ if instanceDigest != nil {
+ signatureSizes = s.SignaturesSizes[*instanceDigest]
+ key = signatureBigDataKey(*instanceDigest)
+ instance = instanceDigest.Encoded()
+ }
+ if len(signatureSizes) > 0 {
+ data, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, fmt.Errorf("looking up signatures data for image %q (%s): %w", s.image.ID, instance, err)
+ }
+ signatureBlobs = data
+ }
+ res := []signature.Signature{}
+ for _, length := range signatureSizes {
+ if offset+length > len(signatureBlobs) {
+ return nil, fmt.Errorf("looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signatureBlobs), offset+length)
+ }
+ sig, err := signature.FromBlob(signatureBlobs[offset : offset+length])
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature at (%d, %d): %w", offset, length, err)
+ }
+ res = append(res, sig)
+ offset += length
+ }
+ if offset != len(signatureBlobs) {
+ return nil, fmt.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatureBlobs)-offset)
+ }
+ return res, nil
+}
+
+// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) getSize() (int64, error) {
+ var sum int64
+ // Size up the data blobs.
+ dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
+ if err != nil {
+ return -1, fmt.Errorf("reading image %q: %w", s.image.ID, err)
+ }
+ for _, dataName := range dataNames {
+ bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
+ if err != nil {
+ return -1, fmt.Errorf("reading data blob size %q for %q: %w", dataName, s.image.ID, err)
+ }
+ sum += bigSize
+ }
+ // Add the signature sizes.
+ for _, sigSize := range s.SignatureSizes {
+ sum += int64(sigSize)
+ }
+ // Walk the layer list.
+ layerID := s.image.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return -1, err
+ }
+ if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
+ return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
+ }
+ sum += layer.UncompressedSize
+ if layer.Parent == "" {
+ break
+ }
+ layerID = layer.Parent
+ }
+ return sum, nil
+}
+
+// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) Size() (int64, error) {
+ return s.getSize()
+}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go
index 07393ee74..104e9d8cc 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go
@@ -4,6 +4,7 @@
package storage
import (
+ "errors"
"fmt"
"path/filepath"
"strings"
@@ -14,7 +15,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -117,13 +117,13 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
// relative to the given store, and returns it in a reference object.
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
if ref == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref)
+ return nil, fmt.Errorf("%q is an empty reference: %w", ref, ErrInvalidReference)
}
if ref[0] == '[' {
// Ignore the store specifier.
closeIndex := strings.IndexRune(ref, ']')
if closeIndex < 1 {
- return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref)
+ return nil, fmt.Errorf("store specifier in %q did not end: %w", ref, ErrInvalidReference)
}
ref = ref[closeIndex+1:]
}
@@ -135,7 +135,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
if split != -1 {
possibleID := ref[split+1:]
if possibleID == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref)
+ return nil, fmt.Errorf("empty trailing digest or ID in %q: %w", ref, ErrInvalidReference)
}
// If it looks like a digest, leave it alone for now.
if _, err := digest.Parse(possibleID); err != nil {
@@ -147,7 +147,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
// so we might as well use the expanded value.
id = img.ID
} else {
- return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID)
+ return nil, fmt.Errorf("%q does not look like an image ID or digest: %w", possibleID, ErrInvalidReference)
}
// We have recognized an image ID; peel it off.
ref = ref[:split]
@@ -173,7 +173,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
var err error
named, err = reference.ParseNormalizedNamed(ref)
if err != nil {
- return nil, errors.Wrapf(err, "parsing named reference %q", ref)
+ return nil, fmt.Errorf("parsing named reference %q: %w", ref, err)
}
named = reference.TagNameOnly(named)
}
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
index aedfdf5de..1dc4c3ad9 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
@@ -11,6 +11,8 @@ import (
"strings"
"time"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/types"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
@@ -19,6 +21,12 @@ import (
)
type tarballImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
reference tarballReference
filenames []string
diffIDs []digest.Digest
@@ -185,6 +193,11 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
// Return the image.
src := &tarballImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(r),
+
reference: *r,
filenames: filenames,
diffIDs: diffIDs,
@@ -197,6 +210,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
configSize: configSize,
manifest: manifestBytes,
}
+ src.Compat = impl.AddCompat(src)
return src, nil
}
@@ -205,11 +219,6 @@ func (is *tarballImageSource) Close() error {
return nil
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -246,28 +255,6 @@ func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *d
return is.manifest, imgspecv1.MediaTypeImageManifest, nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
- }
- return nil, nil
-}
-
func (is *tarballImageSource) Reference() types.ImageReference {
return &is.reference
}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
index 0bae8b259..62d767b58 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
@@ -1,6 +1,7 @@
package alltransports
import (
+ "fmt"
"strings"
// register all known transports
@@ -19,7 +20,6 @@ import (
// The storage transport is registered by storage*.go
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// ParseImageName converts a URL-like image name to a types.ImageReference.
@@ -27,11 +27,11 @@ func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!
parts := strings.SplitN(imgName, ":", 2)
if len(parts) != 2 {
- return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
+ return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
}
transport := transports.Get(parts[0])
if transport == nil {
- return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
+ return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
}
return transport.ParseReference(parts[1])
}
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index a8b20f03a..81c9894c5 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -1,6 +1,7 @@
package storage
import (
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -13,7 +14,6 @@ import (
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// A Container is a reference to a read-write layer with metadata.
@@ -329,8 +329,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
- return nil, errors.Wrapf(ErrDuplicateName,
- fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID))
+ return nil, fmt.Errorf("the container name %q is already in use by %s. You have to remove that container to be able to reuse that name: %w", name, r.byname[name].ID, ErrDuplicateName)
}
}
if err := hasOverlappingRanges(options.UIDMap); err != nil {
@@ -479,7 +478,7 @@ func (r *containerStore) Exists(id string) bool {
func (r *containerStore) BigData(id, key string) ([]byte, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
@@ -490,7 +489,7 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
- return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name")
+ return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
@@ -520,7 +519,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
- return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name")
+ return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
@@ -558,7 +557,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
func (r *containerStore) SetBigData(id, key string, data []byte) error {
if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item")
+ return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index e66613c09..dd5685aca 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -25,6 +25,7 @@ package aufs
import (
"bufio"
+ "errors"
"fmt"
"io"
"io/fs"
@@ -48,7 +49,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
@@ -56,9 +56,9 @@ import (
var (
// ErrAufsNotSupported is returned if aufs is not supported by the host.
- ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems")
+ ErrAufsNotSupported = fmt.Errorf("aufs was not found in /proc/filesystems")
// ErrAufsNested means aufs cannot be used bc we are in a user namespace
- ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace")
+ ErrAufsNested = fmt.Errorf("aufs cannot be used in non-init user namespace")
backingFs = "<unknown>"
enableDirpermLock sync.Once
@@ -91,7 +91,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
- return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs")
+ return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported)
}
@@ -106,7 +106,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("AUFS is not supported over %s", backingFs)
- return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs)
+ return nil, fmt.Errorf("aufs is not supported over %q: %w", backingFs, graphdriver.ErrIncompatibleFS)
}
var mountOptions string
@@ -374,10 +374,10 @@ func (a *Driver) Remove(id string) error {
}
if err != unix.EBUSY {
- return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
+ return fmt.Errorf("aufs: unmount error: %s: %w", mountpoint, err)
}
if retries >= 5 {
- return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
+ return fmt.Errorf("aufs: unmount error after retries: %s: %w", mountpoint, err)
}
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
retries++
@@ -387,21 +387,21 @@ func (a *Driver) Remove(id string) error {
// Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing layers dir for %s", id)
+ return fmt.Errorf("removing layers dir for %s: %w", id, err)
}
if err := atomicRemove(a.getDiffPath(id)); err != nil {
- return errors.Wrapf(err, "could not remove diff path for id %s", id)
+ return fmt.Errorf("could not remove diff path for id %s: %w", id, err)
}
// Atomically remove each directory in turn by first moving it out of the
// way (so that container runtime doesn't find it anymore) before doing removal of
// the whole tree.
if err := atomicRemove(mountpoint); err != nil {
- if errors.Cause(err) == unix.EBUSY {
+ if errors.Is(err, unix.EBUSY) {
logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY")
}
- return errors.Wrapf(err, "could not remove mountpoint for id %s", id)
+ return fmt.Errorf("could not remove mountpoint for id %s: %w", id, err)
}
a.pathCacheLock.Lock()
@@ -419,10 +419,10 @@ func atomicRemove(source string) error {
case os.IsExist(err):
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
if _, e := os.Stat(source); !os.IsNotExist(e) {
- return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target)
+ return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err)
}
default:
- return errors.Wrapf(err, "error preparing atomic delete")
+ return fmt.Errorf("preparing atomic delete: %w", err)
}
return system.EnsureRemoveAll(target)
@@ -626,7 +626,7 @@ func (a *Driver) mount(id string, target string, layers []string, options graphd
rw := a.getDiffPath(id)
if err := a.aufsMount(layers, rw, target, options); err != nil {
- return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
+ return fmt.Errorf("creating aufs mount to %s: %w", target, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index 339aa0d38..be44390da 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -36,7 +36,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -62,7 +61,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
if fsMagic != graphdriver.FsMagicBtrfs {
- return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home)
+ return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
@@ -118,7 +117,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
case "btrfs.mountopt":
return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options")
default:
- return options, userDiskQuota, fmt.Errorf("Unknown option %s", key)
+ return options, userDiskQuota, fmt.Errorf("unknown option %s", key)
}
}
return options, userDiskQuota, nil
@@ -174,7 +173,7 @@ func openDir(path string) (*C.DIR, error) {
dir := C.opendir(Cpath)
if dir == nil {
- return nil, fmt.Errorf("Can't open dir")
+ return nil, fmt.Errorf("can't open dir %s", path)
}
return dir, nil
}
@@ -204,7 +203,7 @@ func subvolCreate(path, name string) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
+ return fmt.Errorf("failed to create btrfs subvolume: %w", errno)
}
return nil
}
@@ -232,7 +231,7 @@ func subvolSnapshot(src, dest, name string) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
+ return fmt.Errorf("failed to create btrfs snapshot: %w", errno)
}
return nil
}
@@ -265,25 +264,25 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
// since it's gone anyway, we don't care
return nil
}
- return fmt.Errorf("error walking subvolumes: %v", err)
+ return fmt.Errorf("walking subvolumes: %w", err)
}
// we want to check children only so skip itself
// it will be removed after the filepath walk anyways
if d.IsDir() && p != fullPath {
sv, err := isSubvolume(p)
if err != nil {
- return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
+ return fmt.Errorf("failed to test if %s is a btrfs subvolume: %w", p, err)
}
if sv {
if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil {
- return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
+ return fmt.Errorf("failed to destroy btrfs child subvolume (%s) of parent (%s): %w", p, dirpath, err)
}
}
}
return nil
}
if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil {
- return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
+ return fmt.Errorf("recursively walking subvolumes for %s failed: %w", dirpath, err)
}
if quotaEnabled {
@@ -309,7 +308,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
+ return fmt.Errorf("failed to destroy btrfs snapshot %s for %s: %w", dirpath, name, errno)
}
return nil
}
@@ -345,7 +344,7 @@ func (d *Driver) enableQuota() error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to enable btrfs quota for %s: %w", dir, errno)
}
d.quotaEnabled = true
@@ -370,7 +369,7 @@ func (d *Driver) subvolRescanQuota() error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to rescan btrfs quota for %s: %w", dir, errno)
}
return nil
@@ -389,7 +388,7 @@ func subvolLimitQgroup(path string, size uint64) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to limit qgroup for %s: %w", dir, errno)
}
return nil
@@ -418,11 +417,11 @@ func qgroupStatus(path string) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error())
+ return fmt.Errorf("failed to search qgroup for %s: %w", path, errno)
}
sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf))
if sh._type != C.BTRFS_QGROUP_STATUS_KEY {
- return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type)
+ return fmt.Errorf("invalid qgroup search header type for %s: %v", path, sh._type)
}
return nil
}
@@ -440,10 +439,10 @@ func subvolLookupQgroup(path string) (uint64, error) {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error())
+ return 0, fmt.Errorf("failed to lookup qgroup for %s: %w", dir, errno)
}
if args.treeid == 0 {
- return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir)
+ return 0, fmt.Errorf("invalid qgroup id for %s: 0", dir)
}
return uint64(args.treeid), nil
@@ -559,7 +558,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
}
driver.options.size = uint64(size)
default:
- return fmt.Errorf("Unknown option %s", key)
+ return fmt.Errorf("unknown option %s", key)
}
}
diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go
index 2db6764c9..bad654b59 100644
--- a/vendor/github.com/containers/storage/drivers/chown.go
+++ b/vendor/github.com/containers/storage/drivers/chown.go
@@ -87,13 +87,13 @@ func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error
cmd.Stdin = bytes.NewReader(config)
output, err := cmd.CombinedOutput()
if len(output) > 0 && err != nil {
- return fmt.Errorf("%v: %s", err, string(output))
+ return fmt.Errorf("%s: %w", string(output), err)
}
if err != nil {
return err
}
if len(output) > 0 {
- return fmt.Errorf("%s", string(output))
+ return fmt.Errorf(string(output))
}
return nil
diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go
index cf608d479..a732075fb 100644
--- a/vendor/github.com/containers/storage/drivers/chown_darwin.go
+++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go
@@ -65,7 +65,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
mappedUID, mappedGID, err := toContainer.ToContainer(pair)
if err != nil {
if (uid != 0) || (gid != 0) {
- return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
+ return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err)
}
mappedUID, mappedGID = uid, gid
}
@@ -78,29 +78,29 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
}
mappedPair, err := toHost.ToHostOverflow(pair)
if err != nil {
- return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
+ return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err)
}
uid, gid = mappedPair.UID, mappedPair.GID
}
if uid != int(st.Uid) || gid != int(st.Gid) {
cap, err := system.Lgetxattr(path, "security.capability")
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Make the change.
if err := system.Lchown(path, uid, gid); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Restore the SUID and SGID bits if they were originally set.
if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
if err := system.Chmod(path, info.Mode()); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
if cap != nil {
if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
index 65756d8ec..42c12c627 100644
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -83,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
mappedUID, mappedGID, err := toContainer.ToContainer(pair)
if err != nil {
if (uid != 0) || (gid != 0) {
- return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
+ return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err)
}
mappedUID, mappedGID = uid, gid
}
@@ -96,29 +96,29 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
}
mappedPair, err := toHost.ToHostOverflow(pair)
if err != nil {
- return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
+ return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err)
}
uid, gid = mappedPair.UID, mappedPair.GID
}
if uid != int(st.Uid) || gid != int(st.Gid) {
cap, err := system.Lgetxattr(path, "security.capability")
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Make the change.
if err := system.Lchown(path, uid, gid); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Restore the SUID and SGID bits if they were originally set.
if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
if err := system.Chmod(path, info.Mode()); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
if cap != nil {
if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go
index c8c4905bf..9a1c6751f 100644
--- a/vendor/github.com/containers/storage/drivers/chroot_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || darwin || freebsd || solaris
// +build linux darwin freebsd solaris
package graphdriver
@@ -12,10 +13,10 @@ import (
// specified path followed by chdir() to the new root directory
func chrootOrChdir(path string) error {
if err := syscall.Chroot(path); err != nil {
- return fmt.Errorf("error chrooting to %q: %v", path, err)
+ return fmt.Errorf("chrooting to %q: %w", path, err)
}
if err := syscall.Chdir(string(os.PathSeparator)); err != nil {
- return fmt.Errorf("error changing to %q: %v", path, err)
+ return fmt.Errorf("changing to %q: %w", path, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go
index f4dc22a96..d5eb5ed98 100644
--- a/vendor/github.com/containers/storage/drivers/chroot_windows.go
+++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go
@@ -9,7 +9,7 @@ import (
// specified path followed by chdir() to the new root directory
func chrootOrChdir(path string) error {
if err := syscall.Chdir(path); err != nil {
- return fmt.Errorf("error changing to %q: %v", path, err)
+ return fmt.Errorf("changing to %q: %w", path, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
index 7773844f9..b92b3b12d 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
@@ -1,3 +1,4 @@
+//go:build cgo
// +build cgo
package copy
@@ -154,7 +155,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
dstPath := filepath.Join(dstDir, relPath)
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
- return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
+ return fmt.Errorf("unable to get raw syscall.Stat_t data for %s", srcPath)
}
isHardlink := false
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
index c23097b76..d239be87b 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
@@ -1,3 +1,4 @@
+//go:build linux && cgo
// +build linux,cgo
package devmapper
@@ -5,6 +6,7 @@ package devmapper
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -12,7 +14,6 @@ import (
"path/filepath"
"strings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -59,7 +60,7 @@ func checkDevAvailable(dev string) error {
}
if !bytes.Contains(out, []byte(dev)) {
- return errors.Errorf("%s is not available for use with devicemapper", dev)
+ return fmt.Errorf("%s is not available for use with devicemapper", dev)
}
return nil
}
@@ -84,7 +85,7 @@ func checkDevInVG(dev string) error {
// got "VG Name" line"
vg := strings.TrimSpace(fields[1])
if len(vg) > 0 {
- return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
+ return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
}
logrus.Error(fields)
break
@@ -112,7 +113,7 @@ func checkDevHasFS(dev string) error {
if bytes.Equal(kv[0], []byte("TYPE")) {
v := bytes.Trim(kv[1], "\"")
if len(v) > 0 {
- return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
+ return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
}
return nil
}
@@ -123,16 +124,16 @@ func checkDevHasFS(dev string) error {
func verifyBlockDevice(dev string, force bool) error {
absPath, err := filepath.Abs(dev)
if err != nil {
- return errors.Errorf("unable to get absolute path for %s: %s", dev, err)
+ return fmt.Errorf("unable to get absolute path for %s: %s", dev, err)
}
realPath, err := filepath.EvalSymlinks(absPath)
if err != nil {
- return errors.Errorf("failed to canonicalise path for %s: %s", dev, err)
+ return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err)
}
if err := checkDevAvailable(absPath); err != nil {
logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath)
if err := checkDevAvailable(realPath); err != nil {
- return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath)
+ return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath)
}
}
if err := checkDevInVG(realPath); err != nil {
@@ -158,7 +159,7 @@ func readLVMConfig(root string) (directLVMConfig, error) {
if os.IsNotExist(err) {
return cfg, nil
}
- return cfg, errors.Wrap(err, "error reading existing setup config")
+ return cfg, fmt.Errorf("reading existing setup config: %w", err)
}
// check if this is just an empty file, no need to produce a json error later if so
@@ -167,17 +168,17 @@ func readLVMConfig(root string) (directLVMConfig, error) {
}
err = json.Unmarshal(b, &cfg)
- return cfg, errors.Wrap(err, "error unmarshaling previous device setup config")
+ return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err)
}
func writeLVMConfig(root string, cfg directLVMConfig) error {
p := filepath.Join(root, "setup-config.json")
b, err := json.Marshal(cfg)
if err != nil {
- return errors.Wrap(err, "error marshalling direct lvm config")
+ return fmt.Errorf("marshalling direct lvm config: %w", err)
}
err = ioutil.WriteFile(p, b, 0600)
- return errors.Wrap(err, "error writing direct lvm config to file")
+ return fmt.Errorf("writing direct lvm config to file: %w", err)
}
func setupDirectLVM(cfg directLVMConfig) error {
@@ -186,13 +187,13 @@ func setupDirectLVM(cfg directLVMConfig) error {
for _, bin := range binaries {
if _, err := exec.LookPath(bin); err != nil {
- return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm")
+ return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err)
}
}
err := os.MkdirAll(lvmProfileDir, 0755)
if err != nil {
- return errors.Wrap(err, "error creating lvm profile directory")
+ return fmt.Errorf("creating lvm profile directory: %w", err)
}
if cfg.AutoExtendPercent == 0 {
@@ -215,34 +216,37 @@ func setupDirectLVM(cfg directLVMConfig) error {
out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
if err != nil {
- return errors.Wrap(err, "error writing storage thinp autoextend profile")
+ return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
}
out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput()
- return errors.Wrap(err, string(out))
+ if err != nil {
+ return fmt.Errorf("%s: %w", string(out), err)
+ }
+ return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index e604b7e31..6989a4381 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -5,6 +5,7 @@ package devmapper
import (
"bufio"
+ "errors"
"fmt"
"io"
"io/fs"
@@ -29,7 +30,6 @@ import (
"github.com/containers/storage/pkg/parsers/kernel"
units "github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -300,7 +300,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
}
defer file.Close()
if err := file.Truncate(size); err != nil {
- return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err)
+ return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %w", filename, err)
}
} else if fi.Size() > size {
logrus.Warnf("devmapper: Can't shrink loopback file %s", filename)
@@ -449,7 +449,7 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error
// Include deleted devices also as cleanup delete device logic
// will go through it and see if there are any deleted devices.
if _, err := devices.lookupDevice(name); err != nil {
- return fmt.Errorf("devmapper: Error looking up device %s:%v", name, err)
+ return fmt.Errorf("devmapper: Error looking up device %s:%w", name, err)
}
return nil
@@ -548,7 +548,7 @@ func xfsSupported() error {
f, err := os.Open("/proc/filesystems")
if err != nil {
- return errors.Wrapf(err, "error checking for xfs support")
+ return fmt.Errorf("checking for xfs support: %w", err)
}
defer f.Close()
@@ -560,7 +560,7 @@ func xfsSupported() error {
}
if err := s.Err(); err != nil {
- return errors.Wrapf(err, "error checking for xfs support")
+ return fmt.Errorf("checking for xfs support: %w", err)
}
return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`)
@@ -731,7 +731,7 @@ func (devices *DeviceSet) initMetaData() error {
devices.TransactionID = transactionID
if err := devices.loadDeviceFilesOnStart(); err != nil {
- return fmt.Errorf("devmapper: Failed to load device files:%v", err)
+ return fmt.Errorf("devmapper: Failed to load device files:%w", err)
}
devices.constructDeviceIDMap()
@@ -870,7 +870,7 @@ func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint
err = devices.cancelDeferredRemoval(baseInfo)
if err != nil {
// If Error is ErrEnxio. Device is probably already gone. Continue.
- if errors.Cause(err) != devicemapper.ErrEnxio {
+ if !errors.Is(err, devicemapper.ErrEnxio) {
return err
}
devinfo = nil
@@ -977,7 +977,7 @@ func (devices *DeviceSet) loadMetadata(hash string) *devInfo {
func getDeviceUUID(device string) (string, error) {
out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output()
if err != nil {
- return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err)
+ return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%w", device, err)
}
uuid := strings.TrimSuffix(string(out), "\n")
@@ -1085,7 +1085,7 @@ func (devices *DeviceSet) createBaseImage() error {
}
if err := devices.saveBaseDeviceUUID(info); err != nil {
- return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err)
+ return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err)
}
return nil
@@ -1098,7 +1098,7 @@ func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) {
info, err := devicemapper.GetInfo(thinPoolDevice)
if err != nil {
- return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err)
+ return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %w", thinPoolDevice, err)
}
// Device does not exist.
@@ -1108,7 +1108,7 @@ func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) {
_, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice)
if err != nil {
- return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err)
+ return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %w", thinPoolDevice, err)
}
if deviceType != "thin-pool" {
@@ -1140,13 +1140,13 @@ func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error {
// If BaseDeviceUUID is nil (upgrade case), save it and return success.
if devices.BaseDeviceUUID == "" {
if err := devices.saveBaseDeviceUUID(baseInfo); err != nil {
- return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err)
+ return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err)
}
return nil
}
if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil {
- return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err)
+ return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %w", err)
}
return nil
@@ -1185,7 +1185,7 @@ func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error {
func (devices *DeviceSet) growFS(info *devInfo) error {
if err := devices.activateDeviceIfNeeded(info, false); err != nil {
- return fmt.Errorf("Error activating devmapper device: %s", err)
+ return fmt.Errorf("activating devmapper device: %s", err)
}
defer devices.deactivateDevice(info)
@@ -1206,7 +1206,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
options = joinMountOptions(options, devices.mountOptions)
if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
- return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
+ return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err)
}
defer func() {
@@ -1218,14 +1218,14 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
switch devices.BaseDeviceFilesystem {
case ext4:
if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil {
- return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
+ return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err)
}
case xfs:
if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil {
- return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
+ return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err)
}
default:
- return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem)
+ return fmt.Errorf("unsupported filesystem type %s", devices.BaseDeviceFilesystem)
}
return nil
}
@@ -1348,7 +1348,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
// Reload size for loopback device
if err := loopback.SetCapacity(dataloopback); err != nil {
- return fmt.Errorf("Unable to update loopback capacity: %s", err)
+ return fmt.Errorf("unable to update loopback capacity: %s", err)
}
// Suspend the pool
@@ -1507,7 +1507,7 @@ func determineDriverCapabilities(version string) error {
versionSplit := strings.Split(version, ".")
major, err := strconv.Atoi(versionSplit[0])
if err != nil {
- return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0])
+ return fmt.Errorf("unable to parse driver major version %q as a number: %w", versionSplit[0], graphdriver.ErrNotSupported)
}
if major > 4 {
@@ -1521,7 +1521,7 @@ func determineDriverCapabilities(version string) error {
minor, err := strconv.Atoi(versionSplit[1])
if err != nil {
- return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1])
+ return fmt.Errorf("unable to parse driver minor version %q as a number: %w", versionSplit[1], graphdriver.ErrNotSupported)
}
/*
@@ -1783,7 +1783,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
}
switch fsMagic {
case graphdriver.FsMagicAufs:
- return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems")
+ return fmt.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems")
}
if devices.dataDevice == "" {
@@ -1979,7 +1979,7 @@ func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64,
}
return uint64(size), nil
default:
- return 0, fmt.Errorf("Unknown option %s", key)
+ return 0, fmt.Errorf("unknown option %s", key)
}
}
@@ -2013,7 +2013,7 @@ func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) err
// If syncDelete is true, we want to return error. If deferred
// deletion is not enabled, we return an error. If error is
// something other then EBUSY, return an error.
- if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy {
+ if syncDelete || !devices.deferredDelete || !errors.Is(err, devicemapper.ErrBusy) {
logrus.Debugf("devmapper: Error deleting device: %s", err)
return err
}
@@ -2174,7 +2174,7 @@ func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove boo
// This function's semantics is such that it does not return an
// error if device does not exist. So if device went away by
// the time we actually tried to remove it, do not return error.
- if errors.Cause(err) != devicemapper.ErrEnxio {
+ if !errors.Is(err, devicemapper.ErrEnxio) {
return err
}
return nil
@@ -2192,7 +2192,7 @@ func (devices *DeviceSet) removeDevice(devname string) error {
if err == nil {
break
}
- if errors.Cause(err) != devicemapper.ErrBusy {
+ if !errors.Is(err, devicemapper.ErrBusy) {
return err
}
@@ -2226,7 +2226,7 @@ func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error {
// Cancel deferred remove
if err := devices.cancelDeferredRemoval(info); err != nil {
// If Error is ErrEnxio. Device is probably already gone. Continue.
- if errors.Cause(err) != devicemapper.ErrEnxio {
+ if !errors.Is(err, devicemapper.ErrEnxio) {
return err
}
}
@@ -2243,7 +2243,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
for i := 0; i < 100; i++ {
err = devicemapper.CancelDeferredRemove(info.Name())
if err != nil {
- if errors.Cause(err) != devicemapper.ErrBusy {
+ if !errors.Is(err, devicemapper.ErrBusy) {
// If we see EBUSY it may be a transient error,
// sleep a bit a retry a few times.
devices.Unlock()
@@ -2344,21 +2344,21 @@ func (devices *DeviceSet) Shutdown(home string) error {
func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
dmDevicePath, err := os.Readlink(info.DevName())
if err != nil {
- return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err)
+ return fmt.Errorf("devmapper: readlink failed for device %v:%w", info.DevName(), err)
}
dmDeviceName := path.Base(dmDevicePath)
filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries"
maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0)
if err != nil {
- return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err)
+ return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%w", err)
}
defer maxRetriesFile.Close()
// Set max retries to 0
_, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries)
if err != nil {
- return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err)
+ return fmt.Errorf("devmapper: Failed to write string %v to file %v:%w", devices.xfsNospaceRetries, filePath, err)
}
return nil
}
@@ -2409,7 +2409,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.Mo
options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
- return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
+ return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err)
}
if fstype == xfs && devices.xfsNospaceRetries != "" {
@@ -2790,7 +2790,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.thinp_percent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.thinp_percent=%s`: %w", val, err)
}
if per >= 100 {
return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100")
@@ -2799,7 +2799,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.thinp_metapercent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.thinp_metapercent=%s`: %w", val, err)
}
if per >= 100 {
return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100")
@@ -2808,7 +2808,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.thinp_autoextend_percent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_percent=%s`: %w", val, err)
}
if per > 100 {
return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100")
@@ -2817,7 +2817,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.thinp_autoextend_threshold":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_threshold=%s`: %w", val, err)
}
if per > 100 {
return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100")
@@ -2826,10 +2826,10 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.libdm_log_level":
level, err := strconv.ParseInt(val, 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.libdm_log_level=%s`: %w", val, err)
}
if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug {
- return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
+ return nil, fmt.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
}
// Register a new logging callback with the specified level.
devicemapper.LogInit(devicemapper.DefaultLogger{
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 770b431bd..d4f92e682 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -1,13 +1,13 @@
package graphdriver
import (
+ "errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
@@ -282,7 +282,7 @@ func init() {
// Register registers an InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
- return fmt.Errorf("Name already registered %s", name)
+ return fmt.Errorf("name already registered %s", name)
}
drivers[name] = initFunc
@@ -296,7 +296,7 @@ func GetDriver(name string, config Options) (Driver, error) {
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root)
- return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root)
+ return nil, fmt.Errorf("failed to GetDriver graph %s %s: %w", name, config.Root, ErrNotSupported)
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
@@ -305,7 +305,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) {
return initFunc(filepath.Join(home, name), options)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
- return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
+ return nil, fmt.Errorf("failed to built-in GetDriver graph %s %s: %w", name, home, ErrNotSupported)
}
// Options is used to initialize a graphdriver
@@ -384,14 +384,13 @@ func New(name string, config Options) (Driver, error) {
}
return driver, nil
}
- return nil, fmt.Errorf("No supported storage backend found")
+ return nil, fmt.Errorf("no supported storage backend found")
}
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
- cause := errors.Cause(err)
- return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS
+ return errors.Is(err, ErrNotSupported) || errors.Is(err, ErrPrerequisites) || errors.Is(err, ErrIncompatibleFS)
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
index 48fb7a550..c43ab4c1e 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -4,6 +4,7 @@
package overlay
import (
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -17,7 +18,6 @@ import (
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -59,7 +59,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
// Mark l2/d as opaque
if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil {
- return errors.Wrap(err, "failed to set opaque flag on middle layer")
+ return fmt.Errorf("failed to set opaque flag on middle layer: %w", err)
}
mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s"
@@ -73,7 +73,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
- return errors.Wrap(err, "failed to mount overlay")
+ return fmt.Errorf("failed to mount overlay: %w", err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -83,13 +83,13 @@ func doesSupportNativeDiff(d, mountOpts string) error {
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
- return errors.Wrap(err, "failed to write to merged directory")
+ return fmt.Errorf("failed to write to merged directory: %w", err)
}
// Check l3/d does not have opaque flag
xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque"))
if err != nil {
- return errors.Wrap(err, "failed to read opaque flag on upper layer")
+ return fmt.Errorf("failed to read opaque flag on upper layer: %w", err)
}
if string(xattrOpaque) == "y" {
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
@@ -101,12 +101,12 @@ func doesSupportNativeDiff(d, mountOpts string) error {
if err.(*os.LinkError).Err == syscall.EXDEV {
return nil
}
- return errors.Wrap(err, "failed to rename dir in merged directory")
+ return fmt.Errorf("failed to rename dir in merged directory: %w", err)
}
// get the xattr of "d2"
xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect"))
if err != nil {
- return errors.Wrap(err, "failed to read redirect flag on upper layer")
+ return fmt.Errorf("failed to read redirect flag on upper layer: %w", err)
}
if string(xattrRedirect) == "d1" {
@@ -157,11 +157,11 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
- if errors.Cause(err) == unix.EINVAL {
+ if errors.Is(err, unix.EINVAL) {
logrus.Info("metacopy option not supported on this kernel", mountOpts)
return false, nil
}
- return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts)
+ return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -171,7 +171,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
// Make a change that only impacts the inode, and check if the pulled-up copy is marked
// as a metadata-only copy
if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil {
- return false, errors.Wrap(err, "error changing permissions on file for metacopy check")
+ return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err)
}
metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy"))
if err != nil {
@@ -179,7 +179,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
logrus.Info("metacopy option not supported")
return false, nil
}
- return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer")
+ return false, fmt.Errorf("metacopy flag was not set on file in upper layer: %w", err)
}
return metacopy != nil, nil
}
@@ -211,7 +211,7 @@ func doesVolatile(d string) (bool, error) {
// Mount using the mandatory options and configured options
opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work"))
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
- return false, errors.Wrapf(err, "failed to mount overlay for volatile check")
+ return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -258,7 +258,7 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
defer cleanupFunc()
if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
- return false, errors.Wrapf(err, "create mapped mount")
+ return false, fmt.Errorf("create mapped mount: %w", err)
}
defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH)
diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
index 2af33a6fc..30423e363 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
@@ -11,7 +11,6 @@ import (
"unsafe"
"github.com/containers/storage/pkg/idtools"
- "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -95,7 +94,7 @@ func createIDMappedMount(source, target string, pid int) error {
path := fmt.Sprintf("/proc/%d/ns/user", pid)
userNsFile, err := os.Open(path)
if err != nil {
- return errors.Wrapf(err, "unable to get user ns file descriptor for %q", path)
+ return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err)
}
var attr attr
diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go
index 7c8fd50a3..cf37f8007 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/mount.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package overlay
@@ -42,7 +43,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
cmd := reexec.Command("storage-mountfrom", dir)
w, err := cmd.StdinPipe()
if err != nil {
- return fmt.Errorf("mountfrom error on pipe creation: %v", err)
+ return fmt.Errorf("mountfrom error on pipe creation: %w", err)
}
output := bytes.NewBuffer(nil)
@@ -50,17 +51,17 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
cmd.Stderr = output
if err := cmd.Start(); err != nil {
w.Close()
- return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
+ return fmt.Errorf("mountfrom error on re-exec cmd: %w", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
- return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
+ return fmt.Errorf("mountfrom json encode to pipe failed: %w", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
- return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output)
+ return fmt.Errorf("mountfrom re-exec output: %s: error: %w", output, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 8600ee685..6bc8343f4 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -6,6 +6,7 @@ package overlay
import (
"bytes"
"encoding/base64"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -36,7 +37,6 @@ import (
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -197,7 +197,7 @@ func checkSupportVolatile(home, runhome string) (bool, error) {
logrus.Debugf("overlay: test mount indicated that volatile is not being used")
}
if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil {
- return false, errors.Wrap(err, "recording volatile-being-used status")
+ return false, fmt.Errorf("recording volatile-being-used status: %w", err)
}
}
}
@@ -223,7 +223,7 @@ func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) {
}
supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home)
if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil {
- return false, errors.Wrap(err2, "recording overlay idmapped mounts support status")
+ return false, fmt.Errorf("recording overlay idmapped mounts support status: %w", err2)
}
return supportsIDMappedMounts, err
}
@@ -256,14 +256,14 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str
if ok && patherr.Err == syscall.ENOSPC {
return false, err
}
- err = errors.Wrap(err, "kernel does not support overlay fs")
+ err = fmt.Errorf("kernel does not support overlay fs: %w", err)
if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil {
- return false, errors.Wrapf(err2, "recording overlay not being supported (%v)", err)
+ return false, fmt.Errorf("recording overlay not being supported (%v): %w", err, err2)
}
return false, err
}
if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil {
- return false, errors.Wrap(err, "recording overlay support status")
+ return false, fmt.Errorf("recording overlay support status: %w", err)
}
}
return supportsDType, nil
@@ -322,7 +322,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
// Create the driver home dir
- if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0755, 0, 0); err != nil {
return nil, err
}
@@ -357,10 +357,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
- return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs)
+ return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS)
}
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) {
- return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "A network file system with user namespaces is not supported. Please use a mount_program")
+ return nil, fmt.Errorf("a network file system with user namespaces is not supported. Please use a mount_program: %w", graphdriver.ErrIncompatibleFS)
}
}
@@ -394,7 +394,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
logrus.Debugf("overlay: test mount indicated that metacopy is not being used")
}
if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil {
- return nil, errors.Wrap(err, "recording metacopy-being-used status")
+ return nil, fmt.Errorf("recording metacopy-being-used status: %w", err)
}
} else {
logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err)
@@ -433,11 +433,11 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if d.quotaCtl, err = quota.NewControl(home); err == nil {
projectQuotaSupported = true
} else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
- return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err)
+ return nil, fmt.Errorf("storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %w", err)
}
} else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
- return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
+ return nil, fmt.Errorf("storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
}
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
@@ -488,7 +488,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
st, err := os.Stat(store)
if err != nil {
- return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err)
+ return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %w", store, err)
}
if !st.IsDir() {
return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
@@ -509,7 +509,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
st, err := os.Stat(lstore)
if err != nil {
- return nil, errors.Wrap(err, "overlay: can't stat additionallayerstore dir")
+ return nil, fmt.Errorf("overlay: can't stat additionallayerstore dir: %w", err)
}
if !st.IsDir() {
return nil, fmt.Errorf("overlay: additionallayerstore path %q must be a directory", lstore)
@@ -536,7 +536,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
if val != "" {
_, err := os.Stat(val)
if err != nil {
- return nil, errors.Wrapf(err, "overlay: can't stat program %q", val)
+ return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err)
}
}
o.mountProgram = val
@@ -707,7 +707,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
}
if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
logrus.Debugf("Unable to create kernel-style whiteout: %v", err)
- return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout")
+ return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err)
}
if len(flags) < unix.Getpagesize() {
@@ -726,16 +726,16 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
if err == nil {
logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower")
- return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
+ return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported)
}
logrus.Debugf("overlay: test mount with a single lower failed %v", err)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home)
- return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home)
+ return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
- return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
+ return supportsDType, fmt.Errorf("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.: %w", graphdriver.ErrNotSupported)
}
func (d *Driver) useNaiveDiff() bool {
@@ -917,6 +917,11 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
gidMaps = opts.IDMappings.GIDs()
}
+ // Make the link directory if it does not exist
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil {
+ return err
+ }
+
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return err
@@ -927,12 +932,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
GID: rootGID,
}
- // Make the link directory if it does not exist
- if err := idtools.MkdirAllAndChownNew(path.Join(d.home, linkDir), 0700, idPair); err != nil {
- return err
- }
-
- if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0700, idPair); err != nil {
+ if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0755, idPair); err != nil {
return err
}
if parent != "" {
@@ -949,7 +949,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
// Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir,
// so that we can’t end up with two symlinks in linkDir pointing to the same layer.
if err := d.Remove(id); err != nil {
- return errors.Wrapf(err, "removing a pre-existing layer directory %q", dir)
+ return fmt.Errorf("removing a pre-existing layer directory %q: %w", dir, err)
}
}
@@ -1057,7 +1057,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
}
driver.options.quota.Inodes = uint64(inodes)
default:
- return fmt.Errorf("Unknown option %s", key)
+ return fmt.Errorf("unknown option %s", key)
}
}
@@ -1080,7 +1080,7 @@ func (d *Driver) getLower(parent string) (string, error) {
}
logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link"))
if err := d.recreateSymlinks(); err != nil {
- return "", errors.Wrap(err, "recreating the links")
+ return "", fmt.Errorf("recreating the links: %w", err)
}
parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link"))
if err != nil {
@@ -1129,7 +1129,7 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
if os.IsNotExist(err) {
logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower)
if err := d.recreateSymlinks(); err != nil {
- return nil, fmt.Errorf("recreating the missing symlinks: %v", err)
+ return nil, fmt.Errorf("recreating the missing symlinks: %w", err)
}
// let's call Readlink on lower again now that we have recreated the missing symlinks
lp, err = os.Readlink(lower)
@@ -1212,15 +1212,10 @@ func (d *Driver) recreateSymlinks() error {
// List all the directories under the home directory
dirs, err := ioutil.ReadDir(d.home)
if err != nil {
- return fmt.Errorf("reading driver home directory %q: %v", d.home, err)
+ return fmt.Errorf("reading driver home directory %q: %w", d.home, err)
}
- linksDir := filepath.Join(d.home, "l")
// This makes the link directory if it doesn't exist
- rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
- if err != nil {
- return err
- }
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil {
return err
}
// Keep looping as long as we take some corrective action in each iteration
@@ -1240,7 +1235,7 @@ func (d *Driver) recreateSymlinks() error {
// Read the "link" file under each layer to get the name of the symlink
data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link"))
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir.Name()))
+ errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
continue
}
linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n"))
@@ -1258,9 +1253,12 @@ func (d *Driver) recreateSymlinks() error {
continue
}
}
+
+ // linkDirFullPath is the full path to the linkDir
+ linkDirFullPath := filepath.Join(d.home, "l")
// Now check if we somehow lost a "link" file, by making sure
// that each symlink we have corresponds to one.
- links, err := ioutil.ReadDir(linksDir)
+ links, err := ioutil.ReadDir(linkDirFullPath)
if err != nil {
errs = multierror.Append(errs, err)
continue
@@ -1268,18 +1266,18 @@ func (d *Driver) recreateSymlinks() error {
// Go through all of the symlinks in the "l" directory
for _, link := range links {
// Read the symlink's target, which should be "../$layer/diff"
- target, err := os.Readlink(filepath.Join(linksDir, link.Name()))
+ target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name()))
if err != nil {
errs = multierror.Append(errs, err)
continue
}
targetComponents := strings.Split(target, string(os.PathSeparator))
if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" {
- errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target))
+ errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
// force the link to be recreated on the next pass
- if err := os.Remove(filepath.Join(linksDir, link.Name())); err != nil {
+ if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil {
if !os.IsNotExist(err) {
- errs = multierror.Append(errs, errors.Wrapf(err, "removing link %q", link))
+ errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err))
} // else don’t report any error, but also don’t set madeProgress.
continue
}
@@ -1295,7 +1293,7 @@ func (d *Driver) recreateSymlinks() error {
// NOTE: If two or more links point to the same target, we will update linkFile
// with every value of link.Name(), and set madeProgress = true every time.
if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID))
+ errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
continue
}
madeProgress = true
@@ -1303,7 +1301,7 @@ func (d *Driver) recreateSymlinks() error {
}
iterations++
if iterations >= maxIterations {
- errs = multierror.Append(errs, fmt.Errorf("Reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations))
+ errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations))
break
}
}
@@ -1383,7 +1381,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link"))
if err := d.recreateSymlinks(); err != nil {
- return "", errors.Wrap(err, "recreating the links")
+ return "", fmt.Errorf("recreating the links: %w", err)
}
link, err = ioutil.ReadFile(path.Join(dir, "link"))
if err != nil {
@@ -1438,11 +1436,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if lower == "" && os.IsNotExist(err) {
logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath)
if err := d.recreateSymlinks(); err != nil {
- return "", fmt.Errorf("Recreating the missing symlinks: %v", err)
+ return "", fmt.Errorf("recreating the missing symlinks: %w", err)
}
lower = newpath
} else if lower == "" {
- return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
+ return "", fmt.Errorf("can't stat lower layer %q: %w", newpath, err)
}
} else {
if !permsKnown {
@@ -1537,7 +1535,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c))
c++
if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil {
- return "", errors.Wrapf(err, "create mapped mount for %q on %q", mappedMountSrc, root)
+ return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err)
}
idMappedMounts[mappedMountSrc] = root
@@ -1594,7 +1592,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if output == "" {
output = "<stderr empty>"
}
- return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output)
+ return fmt.Errorf("using mount program %s: %s: %w", d.options.mountProgram, output, err)
}
return nil
}
@@ -1635,7 +1633,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
flags, data := mount.ParseOptions(mountData)
logrus.Debugf("overlay: mount_data=%s", mountData)
if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil {
- return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err)
+ return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %w", mountTarget, mountData, err)
}
return mergedDir, nil
@@ -1674,7 +1672,7 @@ func (d *Driver) Put(id string) error {
// If they fail, fallback to unix.Unmount
for _, v := range []string{"fusermount3", "fusermount"} {
err := exec.Command(v, "-u", mountpoint).Run()
- if err != nil && errors.Cause(err) != exec.ErrNotFound {
+ if err != nil && !errors.Is(err, exec.ErrNotFound) {
logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
}
if err == nil {
@@ -2117,15 +2115,14 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string,
filepath.Join(target, "blob"),
} {
if _, err := os.Stat(p); err != nil {
- return "", errors.Wrapf(graphdriver.ErrLayerUnknown,
- "failed to stat additional layer %q: %v", p, err)
+ wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err)
+ return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown)
}
}
return target, nil
}
- return "", errors.Wrapf(graphdriver.ErrLayerUnknown,
- "additional layer (%q, %q) not found", dgst, ref)
+ return "", fmt.Errorf("additional layer (%q, %q) not found: %w", dgst, ref, graphdriver.ErrLayerUnknown)
}
func (d *Driver) releaseAdditionalLayerByID(id string) {
diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
index 736c48b9c..651990089 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package overlay
@@ -53,7 +54,7 @@ func generateID(l int) string {
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
- panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
+ panic(fmt.Errorf("reading random number generator, retried for %v: %w", totalBackoff.String(), err))
}
break
diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
index 9fc57b36b..2670ef3df 100644
--- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
+++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package overlayutils
@@ -6,7 +7,6 @@ import (
"fmt"
graphdriver "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
@@ -16,5 +16,5 @@ func ErrDTypeNotSupported(driver, backingFs string) error {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
msg += " Running without d_type is not supported."
- return errors.Wrap(graphdriver.ErrNotSupported, msg)
+ return fmt.Errorf("%s: %w", msg, graphdriver.ErrNotSupported)
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index 0609f970c..0e6a47fc9 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -1,3 +1,4 @@
+//go:build linux && !exclude_disk_quota && cgo
// +build linux,!exclude_disk_quota,cgo
//
@@ -91,7 +92,7 @@ func generateUniqueProjectID(path string) (uint32, error) {
}
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
if !ok {
- return 0, fmt.Errorf("Not a syscall.Stat_t %s", path)
+ return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
}
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
@@ -234,8 +235,8 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
- return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
- projectID, backingFsBlockDev, errno.Error())
+ return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
+ projectID, backingFsBlockDev, errno)
}
return nil
@@ -282,8 +283,8 @@ func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, err
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
- return d, fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
- projectID, q.backingFsBlockDev, errno.Error())
+ return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w",
+ projectID, q.backingFsBlockDev, errno)
}
return d, nil
@@ -301,7 +302,7 @@ func getProjectID(targetPath string) (uint32, error) {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
- return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
+ return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
return uint32(fsx.fsx_projid), nil
@@ -319,14 +320,14 @@ func setProjectID(targetPath string, projectID uint32) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
- return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
+ return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
- return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error())
+ return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno)
}
return nil
@@ -369,7 +370,7 @@ func openDir(path string) (*C.DIR, error) {
dir := C.opendir(Cpath)
if dir == nil {
- return nil, fmt.Errorf("Can't open dir")
+ return nil, fmt.Errorf("can't open dir %v", Cpath)
}
return dir, nil
}
@@ -394,10 +395,13 @@ func makeBackingFsDev(home string) (string, error) {
}
backingFsBlockDev := path.Join(home, "backingFsBlockDev")
+ backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
// Re-create just in case someone copied the home directory over to a new device
- unix.Unlink(backingFsBlockDev)
- if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
- return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err)
+ if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
+ return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
+ }
+ if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
+ return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err)
}
return backingFsBlockDev, nil
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
index 7469138db..a15e91de2 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -1,9 +1,10 @@
+//go:build !linux || exclude_disk_quota || !cgo
// +build !linux exclude_disk_quota !cgo
package quota
import (
- "github.com/pkg/errors"
+ "errors"
)
// Quota limit params - currently we only control blocks hard limit
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 149151741..7baf6c075 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -1,4 +1,5 @@
-//+build windows
+//go:build windows
+// +build windows
package windows
@@ -103,7 +104,7 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e
}
if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
- return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
+ return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err)
}
d := &Driver{
@@ -252,7 +253,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
storageOptions, err := parseStorageOpt(storageOpt)
if err != nil {
- return fmt.Errorf("Failed to parse storage options - %s", err)
+ return fmt.Errorf("failed to parse storage options - %s", err)
}
if storageOptions.size != 0 {
@@ -266,7 +267,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
}
- return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err)
+ return fmt.Errorf("cannot create layer with missing parent %s: %s", parent, err)
}
if err := d.setLayerChain(id, layerChain); err != nil {
@@ -810,7 +811,7 @@ func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []
}
if err = cmd.Wait(); err != nil {
- return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output)
+ return 0, fmt.Errorf("re-exec output: %s: error: %w", output, err)
}
return strconv.ParseInt(output.String(), 10, 64)
@@ -890,13 +891,13 @@ func (d *Driver) getLayerChain(id string) ([]string, error) {
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
- return nil, fmt.Errorf("Unable to read layerchain file - %s", err)
+ return nil, fmt.Errorf("unable to read layerchain file - %s", err)
}
var layerChain []string
err = json.Unmarshal(content, &layerChain)
if err != nil {
- return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err)
+ return nil, fmt.Errorf("failed to unmarshall layerchain json - %s", err)
}
return layerChain, nil
@@ -906,13 +907,13 @@ func (d *Driver) getLayerChain(id string) ([]string, error) {
func (d *Driver) setLayerChain(id string, chain []string) error {
content, err := json.Marshal(&chain)
if err != nil {
- return fmt.Errorf("Failed to marshall layerchain json - %s", err)
+ return fmt.Errorf("failed to marshall layerchain json - %s", err)
}
jPath := filepath.Join(d.dir(id), "layerchain.json")
err = ioutil.WriteFile(jPath, content, 0600)
if err != nil {
- return fmt.Errorf("Unable to write layerchain file - %s", err)
+ return fmt.Errorf("unable to write layerchain file - %s", err)
}
return nil
@@ -999,7 +1000,7 @@ func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
}
options.size = uint64(size)
default:
- return nil, fmt.Errorf("Unknown storage option: %s", key)
+ return nil, fmt.Errorf("unknown storage option: %s", key)
}
}
return &options, nil
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index f29dc8f85..eedaeed9d 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -1,3 +1,4 @@
+//go:build linux || freebsd
// +build linux freebsd
package zfs
@@ -19,7 +20,6 @@ import (
"github.com/containers/storage/pkg/parsers"
"github.com/mistifyio/go-zfs"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -54,13 +54,13 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
if _, err := exec.LookPath("zfs"); err != nil {
logger.Debugf("zfs command is not available: %v", err)
- return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
+ return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites)
}
file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600)
if err != nil {
logger.Debugf("cannot open /dev/zfs: %v", err)
- return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
+ return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites)
}
defer file.Close()
@@ -90,7 +90,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
filesystems, err := zfs.Filesystems(options.fsName)
if err != nil {
- return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err)
+ return nil, fmt.Errorf("cannot find root filesystem %s: %w", options.fsName, err)
}
filesystemsCache := make(map[string]bool, len(filesystems))
@@ -103,15 +103,15 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
}
if rootDataset == nil {
- return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
+ return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps)
if err != nil {
- return nil, fmt.Errorf("Failed to get root uid/gid: %v", err)
+ return nil, fmt.Errorf("failed to get root uid/gid: %w", err)
}
if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil {
- return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
+ return nil, fmt.Errorf("failed to create '%s': %w", base, err)
}
d := &Driver{
@@ -140,7 +140,7 @@ func parseOptions(opt []string) (zfsOptions, error) {
case "zfs.mountopt":
options.mountOptions = val
default:
- return options, fmt.Errorf("Unknown option %s", key)
+ return options, fmt.Errorf("unknown option %s", key)
}
}
return options, nil
@@ -149,7 +149,7 @@ func parseOptions(opt []string) (zfsOptions, error) {
func lookupZfsDataset(rootdir string) (string, error) {
var stat unix.Stat_t
if err := unix.Stat(rootdir, &stat); err != nil {
- return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ return "", fmt.Errorf("failed to access '%s': %w", rootdir, err)
}
wantedDev := stat.Dev
@@ -168,7 +168,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
}
}
- return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
+ return "", fmt.Errorf("failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
}
// Driver holds information about the driver, such as zfs dataset, options and cache.
@@ -315,7 +315,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error {
if opts != nil {
rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs())
if err != nil {
- return fmt.Errorf("Failed to get root uid/gid: %v", err)
+ return fmt.Errorf("failed to get root uid/gid: %w", err)
}
mountLabel = opts.MountLabel
}
@@ -341,22 +341,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error {
mountOpts := label.FormatMountLabel(d.options.mountOptions, mountLabel)
if err := mount.Mount(name, mountpoint, "zfs", mountOpts); err != nil {
- return errors.Wrap(err, "error creating zfs mount")
+ return fmt.Errorf("creating zfs mount: %w", err)
}
defer func() {
if err := detachUnmount(mountpoint); err != nil {
- logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
+ logrus.Warnf("failed to unmount %s mount %s: %v", id, mountpoint, err)
}
}()
if err := os.Chmod(mountpoint, defaultPerms); err != nil {
- return errors.Wrap(err, "error setting permissions on zfs mount")
+ return fmt.Errorf("setting permissions on zfs mount: %w", err)
}
// this is our first mount after creation of the filesystem, and the root dir may still have root
// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
- return errors.Wrapf(err, "modifying zfs mountpoint (%s) ownership", mountpoint)
+ return fmt.Errorf("modifying zfs mountpoint (%s) ownership: %w", mountpoint, err)
}
}
@@ -377,7 +377,7 @@ func parseStorageOpt(storageOpt map[string]string) (string, error) {
case "size":
return v, nil
default:
- return "0", fmt.Errorf("Unknown option %s", key)
+ return "0", fmt.Errorf("unknown option %s", key)
}
}
return "0", nil
@@ -459,13 +459,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr
}
if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
- return "", errors.Wrap(err, "error creating zfs mount")
+ return "", fmt.Errorf("creating zfs mount: %w", err)
}
if remountReadOnly {
opts = label.FormatMountLabel("remount,ro", options.MountLabel)
if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
- return "", errors.Wrap(err, "error remounting zfs mount read-only")
+ return "", fmt.Errorf("remounting zfs mount read-only: %w", err)
}
}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
index 61a2ed871..c3c73c61e 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
@@ -3,8 +3,7 @@ package zfs
import (
"fmt"
- "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
+ graphdriver "github.com/containers/storage/drivers"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -12,13 +11,13 @@ import (
func checkRootdirFs(rootdir string) error {
var buf unix.Statfs_t
if err := unix.Statfs(rootdir, &buf); err != nil {
- return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ return fmt.Errorf("failed to access '%s': %s", rootdir, err)
}
// on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir)
- return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
+ return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootdir, graphdriver.ErrPrerequisites)
}
return nil
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
index 44c68f394..d43ba5c2b 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
@@ -1,8 +1,9 @@
package zfs
import (
+ "fmt"
+
graphdriver "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -19,7 +20,7 @@ func checkRootdirFs(rootDir string) error {
if fsMagic != graphdriver.FsMagicZfs {
logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root")
- return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir)
+ return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootDir, graphdriver.ErrPrerequisites)
}
return nil
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 7d8151b57..02df413c0 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -6,13 +6,13 @@ require (
github.com/BurntSushi/toml v1.1.0
github.com/Microsoft/go-winio v0.5.2
github.com/Microsoft/hcsshim v0.9.3
- github.com/containerd/stargz-snapshotter/estargz v0.11.4
+ github.com/containerd/stargz-snapshotter/estargz v0.12.0
github.com/cyphar/filepath-securejoin v0.2.3
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.15.6
+ github.com/klauspost/compress v1.15.8
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
@@ -21,9 +21,8 @@ require (
github.com/opencontainers/runc v1.1.1-0.20220607072441-a7a45d7d2721
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.10.1
- github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.8.1
- github.com/stretchr/testify v1.7.2
+ github.com/stretchr/testify v1.8.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/ulikunitz/xz v0.5.10
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 6fbca4e4f..083f899f7 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -176,8 +176,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
-github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk=
-github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -425,9 +425,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
-github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA=
+github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -619,14 +619,16 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
-github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go
index 0a06a4323..9567fe90c 100644
--- a/vendor/github.com/containers/storage/idset.go
+++ b/vendor/github.com/containers/storage/idset.go
@@ -1,12 +1,12 @@
package storage
import (
+ "errors"
"fmt"
"strings"
"github.com/containers/storage/pkg/idtools"
"github.com/google/go-intervals/intervalset"
- "github.com/pkg/errors"
)
// idSet represents a set of integer IDs. It is stored as an ordered set of intervals.
@@ -257,9 +257,9 @@ func hasOverlappingRanges(mappings []idtools.IDMap) error {
if conflicts != nil {
if len(conflicts) == 1 {
- return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mapping %s conflicts with other mappings", conflicts[0])
+ return fmt.Errorf("the specified UID and/or GID mapping %s conflicts with other mappings: %w", conflicts[0], ErrInvalidMappings)
}
- return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mappings %s conflict with other mappings", strings.Join(conflicts, ", "))
+ return fmt.Errorf("the specified UID and/or GID mappings %s conflict with other mappings: %w", strings.Join(conflicts, ", "), ErrInvalidMappings)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index a4c3ed22c..e3008ea6c 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -1,6 +1,8 @@
package storage
import (
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -13,7 +15,6 @@ import (
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
const (
@@ -232,7 +233,7 @@ func (i *Image) recomputeDigests() error {
digests := make(map[digest.Digest]struct{})
if i.Digest != "" {
if err := i.Digest.Validate(); err != nil {
- return errors.Wrapf(err, "error validating image digest %q", string(i.Digest))
+ return fmt.Errorf("validating image digest %q: %w", string(i.Digest), err)
}
digests[i.Digest] = struct{}{}
validDigests = append(validDigests, i.Digest)
@@ -242,7 +243,7 @@ func (i *Image) recomputeDigests() error {
continue
}
if digest.Validate() != nil {
- return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name)
+ return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, digest.Validate())
}
// Deduplicate the digest values.
if _, known := digests[digest]; !known {
@@ -283,7 +284,7 @@ func (r *imageStore) Load() error {
// Compute the digest list.
err = image.recomputeDigests()
if err != nil {
- return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
+ return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
}
for _, name := range image.Names {
names[name] = image
@@ -311,7 +312,7 @@ func (r *imageStore) Load() error {
func (r *imageStore) Save() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
+ return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
if !r.Locked() {
return errors.New("image store is not locked for writing")
@@ -387,11 +388,11 @@ func (r *imageStore) lookup(id string) (*Image, bool) {
func (r *imageStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
delete(image.Flags, flag)
return r.Save()
@@ -399,11 +400,11 @@ func (r *imageStore) ClearFlag(id string, flag string) error {
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.Flags == nil {
image.Flags = make(map[string]interface{})
@@ -414,7 +415,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
if !r.IsReadWrite() {
- return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
+ return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
if id == "" {
id = stringid.GenerateRandomID()
@@ -425,12 +426,12 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
}
if _, idInUse := r.byid[id]; idInUse {
- return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
+ return nil, fmt.Errorf("an image with ID %q already exists: %w", id, ErrDuplicateID)
}
names = dedupeNames(names)
for _, name := range names {
if image, nameInUse := r.byname[name]; nameInUse {
- return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
+ return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName)
}
}
if created.IsZero() {
@@ -452,7 +453,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
err = image.recomputeDigests()
if err != nil {
- return nil, errors.Wrapf(err, "error validating digests for new image")
+ return nil, fmt.Errorf("validating digests for new image: %w", err)
}
r.images = append(r.images, image)
r.idindex.Add(id)
@@ -474,7 +475,7 @@ func (r *imageStore) addMappedTopLayer(id, layer string) error {
image.MappedTopLayers = append(image.MappedTopLayers, layer)
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) removeMappedTopLayer(id, layer string) error {
@@ -487,25 +488,25 @@ func (r *imageStore) removeMappedTopLayer(id, layer string) error {
}
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) Metadata(id string) (string, error) {
if image, ok := r.lookup(id); ok {
return image.Metadata, nil
}
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath())
+ return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
if image, ok := r.lookup(id); ok {
image.Metadata = metadata
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) removeName(image *Image, name string) {
@@ -531,11 +532,11 @@ func (r *imageStore) RemoveNames(id string, names []string) error {
func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
+ return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
oldNames := image.Names
names, err := applyNameOperation(oldNames, names, op)
@@ -558,11 +559,11 @@ func (r *imageStore) updateNames(id string, names []string, op updateNameOperati
func (r *imageStore) Delete(id string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
id = image.ID
toDeleteIndex := -1
@@ -605,14 +606,14 @@ func (r *imageStore) Get(id string) (*Image, error) {
if image, ok := r.lookup(id); ok {
return copyImage(image), nil
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) Lookup(name string) (id string, err error) {
if image, ok := r.lookup(name); ok {
return image.ID, nil
}
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) Exists(id string) bool {
@@ -624,27 +625,27 @@ func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
if images, ok := r.bydigest[d]; ok {
return copyImageSlice(images), nil
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with digest %q", d)
+ return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown)
}
func (r *imageStore) BigData(id, key string) ([]byte, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
return ioutil.ReadFile(r.datapath(image.ID, key))
}
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
- return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name")
+ return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
@@ -660,11 +661,11 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
- return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name")
+ return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
@@ -678,7 +679,7 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
func (r *imageStore) BigDataNames(id string) ([]string, error) {
image, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
return copyStringSlice(image.BigDataNames), nil
}
@@ -696,14 +697,14 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
+ return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
}
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
err := os.MkdirAll(r.datadir(image.ID), 0700)
if err != nil {
@@ -712,10 +713,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
var newDigest digest.Digest
if bigDataNameIsManifest(key) {
if digestManifest == nil {
- return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided")
+ return fmt.Errorf("digesting manifest: no manifest digest callback provided: %w", ErrDigestUnknown)
}
if newDigest, err = digestManifest(data); err != nil {
- return errors.Wrapf(err, "error digesting manifest")
+ return fmt.Errorf("digesting manifest: %w", err)
}
} else {
newDigest = digest.Canonical.FromBytes(data)
@@ -759,7 +760,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
}
}
if err = image.recomputeDigests(); err != nil {
- return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
+ return fmt.Errorf("loading recomputing image digest information for %s: %w", image.ID, err)
}
for _, newDigest := range image.Digests {
// add the image to the list of images in the digest-based index which
@@ -780,7 +781,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
func (r *imageStore) Wipe() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index bba8d7588..d24625a22 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -2,6 +2,7 @@ package storage
import (
"bytes"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -27,7 +28,6 @@ import (
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
@@ -481,7 +481,7 @@ func (r *layerStore) Save() error {
func (r *layerStore) saveLayers() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
+ return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
if !r.Locked() {
return errors.New("layer store is not locked for writing")
@@ -500,7 +500,7 @@ func (r *layerStore) saveLayers() error {
func (r *layerStore) saveMounts() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
+ return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
if !r.mountsLockfile.Locked() {
return errors.New("layer store mount information is not locked for writing")
@@ -611,7 +611,7 @@ func (r *layerStore) Size(name string) (int64, error) {
func (r *layerStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@@ -623,7 +623,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@@ -694,7 +694,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
if !r.IsReadWrite() {
- return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
+ return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
if err := os.MkdirAll(r.rundir, 0700); err != nil {
return nil, -1, err
@@ -824,19 +824,19 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
if moreOptions.TemplateLayer != "" {
if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
cleanupFailureContext = "creating a layer from template"
- return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
+ return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err)
}
oldMappings = templateIDMappings
} else {
if writeable {
if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-write layer"
- return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
+ return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err)
}
} else {
if err := r.driver.Create(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-only layer"
- return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
+ return nil, -1, fmt.Errorf("creating layer with ID %q: %w", id, err)
}
}
oldMappings = parentMappings
@@ -897,7 +897,7 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
func (r *layerStore) Mounted(id string) (int, error) {
if !r.IsReadWrite() {
- return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
+ return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
@@ -927,7 +927,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
// You are not allowed to mount layers from readonly stores if they
// are not mounted read/only.
if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
- return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
+ return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
@@ -978,7 +978,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
func (r *layerStore) Unmount(id string, force bool) (bool, error) {
if !r.IsReadWrite() {
- return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
+ return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
@@ -1017,7 +1017,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
if !r.IsReadWrite() {
- return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
+ return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
@@ -1040,7 +1040,7 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
}
rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID)
+ return nil, nil, fmt.Errorf("reading root ID values for layer %q: %w", layer.ID, err)
}
m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap)
fsuids := make(map[int]struct{})
@@ -1048,7 +1048,7 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
st, err := system.Stat(dir)
if err != nil {
- return nil, nil, errors.Wrap(err, "read directory ownership")
+ return nil, nil, fmt.Errorf("read directory ownership: %w", err)
}
lst, err := system.Lstat(dir)
if err != nil {
@@ -1105,7 +1105,7 @@ func (r *layerStore) RemoveNames(id string, names []string) error {
func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
+ return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@@ -1139,25 +1139,25 @@ func (r *layerStore) datapath(id, key string) string {
func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve layer big data value for empty name: %w", ErrInvalidBigDataName)
}
layer, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
return os.Open(r.datapath(layer.ID, key))
}
func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item")
+ return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id)
+ return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown)
}
err := os.MkdirAll(r.datadir(layer.ID), 0700)
if err != nil {
@@ -1169,16 +1169,16 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
// so that it is either accessing the old data or the new one.
writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600)
if err != nil {
- return errors.Wrapf(err, "error opening bigdata file")
+ return fmt.Errorf("opening bigdata file: %w", err)
}
if _, err := io.Copy(writer, data); err != nil {
writer.Close()
- return errors.Wrapf(err, "error copying bigdata for the layer")
+ return fmt.Errorf("copying bigdata for the layer: %w", err)
}
if err := writer.Close(); err != nil {
- return errors.Wrapf(err, "error closing bigdata file for the layer")
+ return fmt.Errorf("closing bigdata file for the layer: %w", err)
}
addName := true
@@ -1198,7 +1198,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
func (r *layerStore) BigDataNames(id string) ([]string, error) {
layer, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id)
+ return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown)
}
return copyStringSlice(layer.BigDataNames), nil
}
@@ -1212,7 +1212,7 @@ func (r *layerStore) Metadata(id string) (string, error) {
func (r *layerStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
+ return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
if layer, ok := r.lookup(id); ok {
layer.Metadata = metadata
@@ -1238,7 +1238,7 @@ func layerHasIncompleteFlag(layer *Layer) bool {
func (r *layerStore) deleteInternal(id string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@@ -1337,7 +1337,7 @@ func (r *layerStore) Delete(id string) error {
// driver level.
mountCount, err := r.Mounted(id)
if err != nil {
- return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
+ return fmt.Errorf("checking if layer %q is still mounted: %w", id, err)
}
for mountCount > 0 {
if _, err := r.Unmount(id, false); err != nil {
@@ -1345,7 +1345,7 @@ func (r *layerStore) Delete(id string) error {
}
mountCount, err = r.Mounted(id)
if err != nil {
- return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
+ return fmt.Errorf("checking if layer %q is still mounted: %w", id, err)
}
}
if err := r.deleteInternal(id); err != nil {
@@ -1375,7 +1375,7 @@ func (r *layerStore) Get(id string) (*Layer, error) {
func (r *layerStore) Wipe() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
@@ -1530,7 +1530,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
diff, err := archive.DecompressStream(blob)
if err != nil {
if err2 := blob.Close(); err2 != nil {
- err = errors.Wrapf(err, "failed to close blob file: %v", err2)
+ err = fmt.Errorf("failed to close blob file: %v: %w", err2, err)
}
aLayer.Release()
return nil, err
@@ -1538,7 +1538,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
rc, err := maybeCompressReadCloser(diff)
if err != nil {
if err2 := closeAll(blob.Close, diff.Close); err2 != nil {
- err = errors.Wrapf(err, "failed to cleanup: %v", err2)
+ err = fmt.Errorf("failed to cleanup: %v: %w", err2, err)
}
aLayer.Release()
return nil, err
@@ -1576,12 +1576,12 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
fgetter, err := r.newFileGetter(to)
if err != nil {
- errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter"))
+ errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err))
if err := decompressor.Close(); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor"))
+ errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
}
if err := tsfile.Close(); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers"))
+ errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
}
return nil, errs.ErrorOrNil()
}
@@ -1590,16 +1590,16 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
var errs *multierror.Error
if err := decompressor.Close(); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor"))
+ errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
}
if err := tsfile.Close(); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers"))
+ errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
}
if err := tarstream.Close(); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream"))
+ errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
}
if err := fgetter.Close(); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter"))
+ errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err))
}
if errs != nil {
return errs.ErrorOrNil()
@@ -1624,7 +1624,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
if !r.IsReadWrite() {
- return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
+ return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(to)
@@ -1671,7 +1671,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
compressor = pgzip.NewWriter(&tsdata)
}
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
- logrus.Infof("Error setting compression concurrency threads to 1: %v; ignoring", err)
+ logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
}
metadata := storage.NewJSONPacker(compressor)
uncompressed, err := archive.DecompressStream(defragmented)
@@ -1920,7 +1920,7 @@ func (r *layerStore) Modified() (bool, error) {
// reload the storage in any case.
info, err := os.Stat(r.layerspath())
if err != nil && !os.IsNotExist(err) {
- return false, errors.Wrap(err, "stat layers file")
+ return false, fmt.Errorf("stat layers file: %w", err)
}
if info != nil {
tmodified = info.ModTime() != r.layerspathModified
@@ -1957,10 +1957,10 @@ func closeAll(closes ...func() error) (rErr error) {
for _, f := range closes {
if err := f(); err != nil {
if rErr == nil {
- rErr = errors.Wrapf(err, "close error")
+ rErr = fmt.Errorf("close error: %w", err)
continue
}
- rErr = errors.Wrapf(rErr, "%v", err)
+ rErr = fmt.Errorf("%v: %w", err, rErr)
}
}
return
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 570000e82..0d0ad7bae 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -5,6 +5,7 @@ import (
"bufio"
"bytes"
"compress/bzip2"
+ "errors"
"fmt"
"io"
"io/fs"
@@ -25,7 +26,6 @@ import (
"github.com/containers/storage/pkg/unshare"
gzip "github.com/klauspost/pgzip"
"github.com/opencontainers/runc/libcontainer/userns"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
)
@@ -220,7 +220,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
case Zstd:
return zstdReader(buf)
default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
}
}
@@ -241,9 +241,9 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
}
}
@@ -363,7 +363,7 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
name, err = canonicalTarName(name, fi.IsDir())
if err != nil {
- return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err)
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
@@ -406,7 +406,7 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
- return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path)
+ return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
}
if capability != nil {
hdr.Xattrs[xattr] = string(capability)
@@ -1123,7 +1123,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) e
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
if tarArchive == nil {
- return fmt.Errorf("Empty archive")
+ return fmt.Errorf("empty archive")
}
dest = filepath.Clean(dest)
if options == nil {
@@ -1239,7 +1239,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
}
if srcSt.IsDir() {
- return fmt.Errorf("Can't copy a directory")
+ return fmt.Errorf("can't copy a directory")
}
// Clean up the trailing slash. This must be done in an operating
@@ -1450,7 +1450,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
contentReader, contentWriter, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ return fmt.Errorf("creating pipe extract data to %q: %w", dest, err)
}
defer contentReader.Close()
defer contentWriter.Close()
@@ -1469,11 +1469,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
hashWorker.Done()
}()
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
- err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
}
hashWorker.Wait()
if err == nil {
- err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
}
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
index a0872444f..8e7a2fd02 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package archive
@@ -34,7 +35,7 @@ func CanonicalTarNameForPath(p string) (string, error) {
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
- return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ return "", fmt.Errorf("windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
index ca8832fe4..59a3207fd 100644
--- a/vendor/github.com/containers/storage/pkg/archive/diff.go
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -86,7 +86,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(parentPath, 0600)
+ err = os.MkdirAll(parentPath, 0755)
if err != nil {
return 0, err
}
@@ -184,7 +184,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
- return 0, fmt.Errorf("Invalid aufs hardlink")
+ return 0, fmt.Errorf("invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index 482e03663..2232f5d4a 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -12,7 +12,6 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runc/libcontainer/userns"
- "github.com/pkg/errors"
)
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
@@ -63,7 +62,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
if tarArchive == nil {
- return fmt.Errorf("Empty archive")
+ return fmt.Errorf("empty archive")
}
if options == nil {
options = &archive.TarOptions{}
@@ -115,7 +114,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
contentReader, contentWriter, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ return fmt.Errorf("creating pipe extract data to %q: %w", dest, err)
}
defer contentReader.Close()
defer contentWriter.Close()
@@ -134,11 +133,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
hashWorker.Done()
}()
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
- err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
}
hashWorker.Wait()
if err == nil {
- err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
}
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index e4b45a454..2d64c2800 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -1,9 +1,11 @@
+//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive
import (
"bytes"
+ "errors"
"flag"
"fmt"
"io"
@@ -15,7 +17,6 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/reexec"
- "github.com/pkg/errors"
)
// untar is the entry-point for storage-untar on re-exec. This is not used on
@@ -69,7 +70,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
// child
r, w, err := os.Pipe()
if err != nil {
- return fmt.Errorf("Untar pipe failure: %v", err)
+ return fmt.Errorf("untar pipe failure: %w", err)
}
if root != "" {
@@ -96,13 +97,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
if err := cmd.Start(); err != nil {
w.Close()
- return fmt.Errorf("Untar error on re-exec cmd: %v", err)
+ return fmt.Errorf("untar error on re-exec cmd: %w", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
- return fmt.Errorf("Untar json encode to pipe failed: %v", err)
+ return fmt.Errorf("untar json encode to pipe failed: %w", err)
}
w.Close()
@@ -112,7 +113,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
// pending on write pipe forever
io.Copy(ioutil.Discard, decompressedArchive)
- return fmt.Errorf("Error processing tar file(%v): %s", err, output)
+ return fmt.Errorf("processing tar file(%s): %w", output, err)
}
return nil
}
@@ -184,22 +185,24 @@ func invokePack(srcPath string, options *archive.TarOptions, root string) (io.Re
stdin, err := cmd.StdinPipe()
if err != nil {
- return nil, errors.Wrap(err, "error getting options pipe for tar process")
+ return nil, fmt.Errorf("getting options pipe for tar process: %w", err)
}
if err := cmd.Start(); err != nil {
- return nil, errors.Wrap(err, "tar error on re-exec cmd")
+ return nil, fmt.Errorf("tar error on re-exec cmd: %w", err)
}
go func() {
err := cmd.Wait()
- err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
+ if err != nil {
+ err = fmt.Errorf("processing tar file(%s): %w", errBuff, err)
+ }
tarW.CloseWithError(err)
}()
if err := json.NewEncoder(stdin).Encode(options); err != nil {
stdin.Close()
- return nil, errors.Wrap(err, "tar json encode to pipe failed")
+ return nil, fmt.Errorf("tar json encode to pipe failed: %w", err)
}
stdin.Close()
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
index 58729ec8c..255882174 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
@@ -36,7 +36,7 @@ func chroot(path string) (err error) {
}
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
- return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
+ return fmt.Errorf("creating mount namespace before pivot: %w", err)
}
// make everything in new ns private
@@ -53,7 +53,7 @@ func chroot(path string) (err error) {
// setup oldRoot for pivot_root
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
if err != nil {
- return fmt.Errorf("Error setting up pivot dir: %v", err)
+ return fmt.Errorf("setting up pivot dir: %w", err)
}
var mounted bool
@@ -72,7 +72,7 @@ func chroot(path string) (err error) {
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
// because we already cleaned it up on failed pivot_root
if errCleanup != nil && !os.IsNotExist(errCleanup) {
- errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
+ errCleanup = fmt.Errorf("cleaning up after pivot: %w", errCleanup)
if err == nil {
err = errCleanup
}
@@ -82,7 +82,7 @@ func chroot(path string) (err error) {
if err := unix.PivotRoot(path, pivotDir); err != nil {
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
if err := os.Remove(pivotDir); err != nil {
- return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
+ return fmt.Errorf("cleaning up after failed pivot: %w", err)
}
return realChroot(path)
}
@@ -93,17 +93,17 @@ func chroot(path string) (err error) {
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := unix.Chdir("/"); err != nil {
- return fmt.Errorf("Error changing to new root: %v", err)
+ return fmt.Errorf("changing to new root: %w", err)
}
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
- return fmt.Errorf("Error making old root private after pivot: %v", err)
+ return fmt.Errorf("making old root private after pivot: %w", err)
}
// Now unmount the old root so it's no longer visible from the new root
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
- return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
+ return fmt.Errorf("while unmounting old root after pivot: %w", err)
}
mounted = false
@@ -112,10 +112,10 @@ func chroot(path string) (err error) {
func realChroot(path string) error {
if err := unix.Chroot(path); err != nil {
- return fmt.Errorf("Error after fallback to chroot: %v", err)
+ return fmt.Errorf("after fallback to chroot: %w", err)
}
if err := unix.Chdir("/"); err != nil {
- return fmt.Errorf("Error changing to new root after chroot: %v", err)
+ return fmt.Errorf("changing to new root after chroot: %w", err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
index 6dd5146cc..3ebee9496 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
@@ -1,4 +1,5 @@
-//+build !windows,!darwin
+//go:build !windows && !darwin
+// +build !windows,!darwin
package chrootarchive
@@ -68,7 +69,7 @@ func applyLayer() {
encoder := json.NewEncoder(os.Stdout)
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
- fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
+ fatal(fmt.Errorf("unable to encode layerSize JSON: %w", err))
}
if _, err := flush(os.Stdin); err != nil {
@@ -104,7 +105,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
data, err := json.Marshal(options)
if err != nil {
- return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
+ return 0, fmt.Errorf("ApplyLayer json encode: %w", err)
}
cmd := reexec.Command("storage-applyLayer", dest)
@@ -115,14 +116,14 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
cmd.Stdout, cmd.Stderr = outBuf, errBuf
if err = cmd.Run(); err != nil {
- return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
+ return 0, fmt.Errorf("ApplyLayer stdout: %s stderr: %s %w", outBuf, errBuf, err)
}
// Stdout should be a valid JSON struct representing an applyLayerResponse.
response := applyLayerResponse{}
decoder := json.NewDecoder(outBuf)
if err = decoder.Decode(&response); err != nil {
- return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
+ return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %w", err)
}
return response.LayerSize, nil
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
index b8b278a13..c88091393 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -3,6 +3,7 @@ package chunked
import (
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -19,7 +20,6 @@ import (
"github.com/containers/storage/pkg/ioutils"
jsoniter "github.com/json-iterator/go"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -117,7 +117,7 @@ func (c *layersCache) load() error {
continue
}
logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
- } else if errors.Cause(err) != os.ErrNotExist {
+ } else if !errors.Is(err, os.ErrNotExist) {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go
index 96254bc4e..8d4d3c4a7 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go
@@ -4,6 +4,7 @@ import (
archivetar "archive/tar"
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"io"
"strconv"
@@ -14,7 +15,6 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/vbatts/tar-split/archive/tar"
)
@@ -92,7 +92,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
*/
tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
if err != nil {
- return nil, 0, errors.Wrap(err, "parse ToC offset")
+ return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
}
size := int64(blobSize - footerSize - tocOffset)
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 7b6cd8fe4..7278f2d88 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -4,6 +4,7 @@ import (
archivetar "archive/tar"
"context"
"encoding/base64"
+ "errors"
"fmt"
"hash"
"io"
@@ -31,7 +32,6 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/archive/tar"
"golang.org/x/sys/unix"
@@ -406,7 +406,7 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) {
// setFileAttrs sets the file attributes for file given metadata
func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error {
if file == nil || file.Fd() < 0 {
- return errors.Errorf("invalid file")
+ return errors.New("invalid file")
}
fd := int(file.Fd())
@@ -547,7 +547,7 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil
// Add an additional check to make sure the opened fd is inside the rootfs
if !strings.HasPrefix(target, targetRoot) {
unix.Close(fd)
- return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name)
+ return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name)
}
return fd, err
@@ -847,10 +847,10 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
return err
}
if part == nil {
- return errors.Errorf("invalid stream returned")
+ return errors.New("invalid stream returned")
}
default:
- return errors.Errorf("internal error: missing part misses both local and remote data stream")
+ return errors.New("internal error: missing part misses both local and remote data stream")
}
for _, mf := range missingPart.Chunks {
@@ -865,7 +865,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
}
if mf.File.Name == "" {
- Err = errors.Errorf("file name empty")
+ Err = errors.New("file name empty")
goto exit
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
index 3a406ba78..4d952aba3 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
@@ -1,13 +1,14 @@
+//go:build !linux
// +build !linux
package chunked
import (
"context"
+ "errors"
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
)
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
index 6a0ac2464..6e6852d4d 100644
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
@@ -1,3 +1,4 @@
+//go:build linux && cgo
// +build linux,cgo
package devicemapper
@@ -805,7 +806,7 @@ func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDevice
if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil {
if doSuspend {
if err2 := ResumeDevice(baseName); err2 != nil {
- return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2)
+ return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2)
}
}
return err
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
index 5be98165e..abf6e2f85 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
@@ -1,6 +1,7 @@
package fileutils
import (
+ "errors"
"fmt"
"io"
"os"
@@ -9,7 +10,6 @@ import (
"strings"
"text/scanner"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -340,13 +340,13 @@ func ReadSymlinkedDirectory(path string) (string, error) {
// The target of the symbolic link can be a file and a directory.
func ReadSymlinkedPath(path string) (realPath string, err error) {
if realPath, err = filepath.Abs(path); err != nil {
- return "", errors.Wrapf(err, "unable to get absolute path for %q", path)
+ return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
- return "", errors.Wrapf(err, "failed to canonicalise path for %q", path)
+ return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err)
}
if _, err := os.Stat(realPath); err != nil {
- return "", errors.Wrapf(err, "failed to stat target %q of %q", realPath, path)
+ return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err)
}
return realPath, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 3ae2a1cd7..a7f4eaf13 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -14,7 +14,6 @@ import (
"syscall"
"github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -120,7 +119,7 @@ func RawToContainer(hostID int, idMap []IDMap) (int, error) {
return contID, nil
}
}
- return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+ return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID)
}
// RawToHost takes an id mapping and a remapped ID, and translates the ID to
@@ -140,7 +139,7 @@ func RawToHost(contID int, idMap []IDMap) (int, error) {
return hostID, nil
}
}
- return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+ return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID)
}
// IDPair is a UID and GID pair
@@ -168,10 +167,10 @@ func NewIDMappings(username, groupname string) (*IDMappings, error) {
return nil, err
}
if len(subuidRanges) == 0 {
- return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName)
+ return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName)
}
if len(subgidRanges) == 0 {
- return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName)
+ return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName)
}
return &IDMappings{
@@ -343,16 +342,16 @@ func parseSubidFile(path, username string) (ranges, error) {
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
- return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") {
startid, err := strconv.Atoi(parts[1])
if err != nil {
- return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
- return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
@@ -362,7 +361,7 @@ func parseSubidFile(path, username string) (ranges, error) {
func checkChownErr(err error, name string, uid, gid int) error {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
- return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate", uid, gid, name)
+ return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err)
}
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
index c96465369..03e787376 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
@@ -4,10 +4,9 @@
package idtools
import (
+ "errors"
"os/user"
"unsafe"
-
- "github.com/pkg/errors"
)
/*
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index 7f270c61f..daff1e4a9 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package idtools
diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go
index 1c819a1f9..042d0ea95 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/parser.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go
@@ -11,22 +11,22 @@ import (
func parseTriple(spec []string) (container, host, size uint32, err error) {
cid, err := strconv.ParseUint(spec[0], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err)
}
hid, err := strconv.ParseUint(spec[1], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err)
}
sz, err := strconv.ParseUint(spec[2], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err)
}
return uint32(cid), uint32(hid), uint32(sz), nil
}
// ParseIDMap parses idmap triples from string.
func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
- stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec)
+ stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec)
for _, idMapSpec := range mapSpec {
if idMapSpec == "" {
continue
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
index 3dd7bf210..a467f41c3 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
@@ -37,32 +37,32 @@ var (
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
if err := addUser(name); err != nil {
- return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ return -1, -1, fmt.Errorf("adding user %q: %w", name, err)
}
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
- return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err)
}
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
- return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
- return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
- return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
- return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err)
}
return uid, gid, nil
}
@@ -77,12 +77,12 @@ func addUser(userName string) error {
}
})
if userCommand == "" {
- return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ return fmt.Errorf("cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
out, err := execCmd(userCommand, args)
if err != nil {
- return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out))
}
return nil
}
@@ -93,33 +93,33 @@ func createSubordinateRanges(name string) error {
// by the distro tooling
ranges, err := readSubuid(name)
if err != nil {
- return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
- return fmt.Errorf("Can't find available subuid range: %v", err)
+ return fmt.Errorf("can't find available subuid range: %w", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
if err != nil {
- return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err)
}
}
ranges, err = readSubgid(name)
if err != nil {
- return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
- return fmt.Errorf("Can't find available subgid range: %v", err)
+ return fmt.Errorf("can't find available subgid range: %w", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
if err != nil {
- return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err)
}
}
return nil
@@ -128,7 +128,7 @@ func createSubordinateRanges(name string) error {
func findNextUIDRange() (int, error) {
ranges, err := readSubuid("ALL")
if err != nil {
- return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
@@ -137,7 +137,7 @@ func findNextUIDRange() (int, error) {
func findNextGIDRange() (int, error) {
ranges, err := readSubgid("ALL")
if err != nil {
- return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
index d98b354cb..15bd98ede 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package idtools
@@ -8,5 +9,5 @@ import "fmt"
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
- return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+ return -1, -1, fmt.Errorf("no support for adding users or groups on this OS")
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
index 9703ecbd9..33a7dee6c 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package idtools
@@ -23,7 +24,7 @@ func resolveBinary(binname string) (string, error) {
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
- return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+ return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
func execCmd(cmd, args string) ([]byte, error) {
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
index 6a00141c3..d3f4df098 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
@@ -1,11 +1,10 @@
package lockfile
import (
+ "fmt"
"path/filepath"
"sync"
"time"
-
- "github.com/pkg/errors"
)
// A Locker represents a file lock where the file is used to cache an
@@ -87,14 +86,14 @@ func getLockfile(path string, ro bool) (Locker, error) {
}
cleanPath, err := filepath.Abs(path)
if err != nil {
- return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
+ return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err)
}
if locker, ok := lockfiles[cleanPath]; ok {
if ro && locker.IsReadWrite() {
- return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
+ return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath)
}
if !ro && !locker.IsReadWrite() {
- return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath)
+ return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath)
}
return locker, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
index aa843920f..b04c1ad05 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
@@ -15,7 +15,6 @@ import (
"time"
"github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -86,7 +85,7 @@ func openLock(path string, ro bool) (fd int, err error) {
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
- return fd, errors.Wrap(err, "creating locker directory")
+ return fd, fmt.Errorf("creating locker directory: %w", err)
}
return openLock(path, ro)
@@ -112,7 +111,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
- return nil, errors.Wrapf(err, "error opening %q", path)
+ return nil, err
}
unix.Close(fd)
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go
index 07a0f4847..5de3a671d 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags.go
@@ -99,7 +99,7 @@ func MergeTmpfsOptions(options []string) ([]string, error) {
}
opt := strings.SplitN(option, "=", 2)
if len(opt) != 2 || !validFlags[opt[0]] {
- return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ return nil, fmt.Errorf("invalid tmpfs option %q", opt)
}
if !dataCollisions[opt[0]] {
// We prepend the option and add to collision map
@@ -142,7 +142,7 @@ func ParseTmpfsOptions(options string) (int, string, error) {
for _, o := range strings.Split(data, ",") {
opt := strings.SplitN(o, "=", 2)
if !validFlags[opt[0]] {
- return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+ return 0, "", fmt.Errorf("invalid tmpfs option %q", opt)
}
}
return flags, data, nil
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
index 2404e331d..c70b0bf99 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
@@ -62,7 +62,7 @@ func mount(device, target, mType string, flag uintptr, data string) error {
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
reason := C.GoString(C.strerror(*C.__error()))
- return fmt.Errorf("Failed to call nmount: %s", reason)
+ return fmt.Errorf("failed to call nmount: %s", reason)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
index 71f205b28..20d67f780 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
@@ -1,3 +1,4 @@
+//go:build darwin
// +build darwin
// Package kernel provides helper function to get, parse and compare kernel
@@ -37,16 +38,16 @@ func getRelease() (string, error) {
// It has the format like ' Kernel Version: Darwin 14.5.0'
content := strings.SplitN(line, ":", 2)
if len(content) != 2 {
- return "", fmt.Errorf("Kernel Version is invalid")
+ return "", fmt.Errorf("kernel version is invalid")
}
prettyNames, err := shellwords.Parse(content[1])
if err != nil {
- return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error())
+ return "", fmt.Errorf("kernel version is invalid: %s", err.Error())
}
if len(prettyNames) != 2 {
- return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ")
+ return "", fmt.Errorf("kernel version needs to be 'Darwin x.x.x' ")
}
release = prettyNames[1]
}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
index acc897168..85c23381d 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
@@ -13,7 +13,7 @@ import (
func ParseKeyValueOpt(opt string) (string, string, error) {
parts := strings.SplitN(opt, "=", 2)
if len(parts) != 2 {
- return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
+ return "", "", fmt.Errorf("unable to parse key/value option: %s", opt)
}
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
index a0183885b..d66f1c5a4 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
@@ -19,7 +19,7 @@ import "C"
func getMemInfo() (int64, int64, error) {
data, err := unix.SysctlRaw("vm.vmtotal")
if err != nil {
- return -1, -1, fmt.Errorf("Can't get kernel info: %v", err)
+ return -1, -1, fmt.Errorf("can't get kernel info: %w", err)
}
if len(data) != C.sizeof_struct_vmtotal {
return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data))
@@ -39,12 +39,12 @@ func getSwapInfo() (int64, int64, error) {
)
swapCount, err := unix.SysctlUint32("vm.nswapdev")
if err != nil {
- return -1, -1, fmt.Errorf("error reading vm.nswapdev: %v", err)
+ return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err)
}
for i := 0; i < int(swapCount); i++ {
data, err := unix.SysctlRaw("vm.swap_info", i)
if err != nil {
- return -1, -1, fmt.Errorf("error reading vm.swap_info.%d: %v", i, err)
+ return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err)
}
if len(data) != C.sizeof_struct_xswdev {
return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data))
@@ -62,15 +62,15 @@ func getSwapInfo() (int64, int64, error) {
func ReadMemInfo() (*MemInfo, error) {
MemTotal, MemFree, err := getMemInfo()
if err != nil {
- return nil, fmt.Errorf("error getting memory totals %v\n", err)
+ return nil, fmt.Errorf("getting memory totals %w", err)
}
SwapTotal, SwapFree, err := getSwapInfo()
if err != nil {
- return nil, fmt.Errorf("error getting swap totals %v\n", err)
+ return nil, fmt.Errorf("getting swap totals %w", err)
}
if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 {
- return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ return nil, fmt.Errorf("getting system memory info %w", err)
}
meminfo := &MemInfo{}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
index 925776e78..d727b545c 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
@@ -1,3 +1,4 @@
+//go:build solaris && cgo
// +build solaris,cgo
package system
@@ -90,7 +91,7 @@ func ReadMemInfo() (*MemInfo, error) {
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
SwapFree < 0 {
- return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ return nil, fmt.Errorf("getting system memory info %w", err)
}
meminfo := &MemInfo{}
diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go
index aab891522..9f2509738 100644
--- a/vendor/github.com/containers/storage/pkg/system/path_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package system
@@ -21,13 +22,13 @@ import (
// d:\ --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
- return "", fmt.Errorf("No relative path specified in %q", path)
+ return "", fmt.Errorf("relative path not specified in %q", path)
}
if !filepath.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
- return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ return "", fmt.Errorf("specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go
index 510e71428..b65121f1d 100644
--- a/vendor/github.com/containers/storage/pkg/system/rm.go
+++ b/vendor/github.com/containers/storage/pkg/system/rm.go
@@ -1,12 +1,12 @@
package system
import (
+ "fmt"
"os"
"syscall"
"time"
"github.com/containers/storage/pkg/mount"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -67,7 +67,7 @@ func EnsureRemoveAll(dir string) error {
}
if e := mount.Unmount(pe.Path); e != nil {
- return errors.Wrapf(e, "error while removing %s", dir)
+ return fmt.Errorf("while removing %s: %w", dir, e)
}
if exitOnErr[pe.Path] == maxRetry {
diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
index 1bb852d11..c4816c133 100644
--- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
@@ -1,9 +1,11 @@
+//go:build linux || freebsd || darwin
// +build linux freebsd darwin
package system
import (
- "github.com/pkg/errors"
+ "errors"
+
"golang.org/x/sys/unix"
)
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
index 221c7e088..c854fdf5e 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
@@ -6,7 +6,6 @@ import (
"os/user"
"sync"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -27,7 +26,7 @@ func HomeDir() (string, error) {
if home == "" {
usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID()))
if err != nil {
- homeDir, homeDirErr = "", errors.Wrapf(err, "unable to resolve HOME directory")
+ homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err)
return
}
homeDir, homeDirErr = usr.HomeDir, nil
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
index aec416720..f52760abb 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
@@ -5,6 +5,7 @@ package unshare
import (
"bytes"
+ "errors"
"fmt"
"io"
"os"
@@ -14,7 +15,6 @@ import (
"syscall"
"github.com/containers/storage/pkg/reexec"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -49,7 +49,7 @@ func (c *Cmd) Start() error {
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pid pipe")
+ return fmt.Errorf("creating pid pipe: %w", err)
}
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
@@ -59,7 +59,7 @@ func (c *Cmd) Start() error {
if err != nil {
pidRead.Close()
pidWrite.Close()
- return errors.Wrapf(err, "error creating pid pipe")
+ return fmt.Errorf("creating pid pipe: %w", err)
}
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, continueRead)
@@ -108,13 +108,13 @@ func (c *Cmd) Start() error {
pidString := ""
b := new(bytes.Buffer)
if _, err := io.Copy(b, pidRead); err != nil {
- return errors.Wrapf(err, "Reading child PID")
+ return fmt.Errorf("reading child PID: %w", err)
}
pidString = b.String()
pid, err := strconv.Atoi(pidString)
if err != nil {
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
- return errors.Wrapf(err, "error parsing PID %q", pidString)
+ return fmt.Errorf("parsing PID %q: %w", pidString, err)
}
// Run any additional setup that we want to do before the child starts running proper.
@@ -157,7 +157,7 @@ func ExecRunnable(cmd Runnable, cleanup func()) {
os.Exit(status)
}
if err := cmd.Run(); err != nil {
- if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
+ if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ProcessState.Exited() {
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
if waitStatus.Exited() {
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
index 16d14d2a9..c86390bd3 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
@@ -6,6 +6,7 @@ package unshare
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"os"
@@ -21,7 +22,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability"
)
@@ -119,7 +119,7 @@ func (c *Cmd) Start() error {
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pid pipe")
+ return fmt.Errorf("creating pid pipe: %w", err)
}
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
@@ -129,7 +129,7 @@ func (c *Cmd) Start() error {
if err != nil {
pidRead.Close()
pidWrite.Close()
- return errors.Wrapf(err, "error creating pid pipe")
+ return fmt.Errorf("creating pid pipe: %w", err)
}
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, continueRead)
@@ -178,13 +178,13 @@ func (c *Cmd) Start() error {
pidString := ""
b := new(bytes.Buffer)
if _, err := io.Copy(b, pidRead); err != nil {
- return errors.Wrapf(err, "Reading child PID")
+ return fmt.Errorf("reading child PID: %w", err)
}
pidString = b.String()
pid, err := strconv.Atoi(pidString)
if err != nil {
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
- return errors.Wrapf(err, "error parsing PID %q", pidString)
+ return fmt.Errorf("parsing PID %q: %w", pidString, err)
}
pidString = fmt.Sprintf("%d", pid)
@@ -194,26 +194,26 @@ func (c *Cmd) Start() error {
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
- return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err)
}
defer setgroups.Close()
if c.GidMappingsEnableSetgroups {
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
- return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening \"allow\" to /proc/%s/setgroups: %w", pidString, err)
}
} else {
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
- return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("writing \"deny\" to /proc/%s/setgroups: %w", pidString, err)
}
}
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
uidmap, gidmap, err := GetHostIDMappings("")
if err != nil {
- fmt.Fprintf(continueWrite, "Reading ID mappings in parent: %v", err)
- return errors.Wrapf(err, "Reading ID mappings in parent")
+ fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
+ return fmt.Errorf("reading ID mappings in parent: %w", err)
}
if len(c.UidMappings) == 0 {
c.UidMappings = uidmap
@@ -240,7 +240,7 @@ func (c *Cmd) Start() error {
if c.UseNewgidmap {
path, err := exec.LookPath("newgidmap")
if err != nil {
- return errors.Wrapf(err, "error finding newgidmap")
+ return fmt.Errorf("finding newgidmap: %w", err)
}
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
g.Reset()
@@ -249,7 +249,7 @@ func (c *Cmd) Start() error {
if err := cmd.Run(); err == nil {
gidmapSet = true
} else {
- logrus.Warnf("Error running newgidmap: %v: %s", err, g.String())
+ logrus.Warnf("running newgidmap: %v: %s", err, g.String())
isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID)
if err != nil {
logrus.Warnf("Failed to check for setgid on %s: %v", path, err)
@@ -268,23 +268,23 @@ func (c *Cmd) Start() error {
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err)
}
defer setgroups.Close()
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err)
- return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("writing 'deny' to /proc/%s/setgroups: %w", pidString, err)
}
}
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
- fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
+ fmt.Fprintf(continueWrite, "opening /proc/%s/gid_map: %v", pidString, err)
+ return fmt.Errorf("opening /proc/%s/gid_map: %w", pidString, err)
}
defer gidmap.Close()
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
- fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
- return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString)
+ fmt.Fprintf(continueWrite, "writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
+ return fmt.Errorf("writing %q to /proc/%s/gid_map: %w", g.String(), pidString, err)
}
}
}
@@ -300,7 +300,7 @@ func (c *Cmd) Start() error {
if c.UseNewuidmap {
path, err := exec.LookPath("newuidmap")
if err != nil {
- return errors.Wrapf(err, "error finding newuidmap")
+ return fmt.Errorf("finding newuidmap: %w", err)
}
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
u.Reset()
@@ -328,12 +328,12 @@ func (c *Cmd) Start() error {
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
+ return fmt.Errorf("opening /proc/%s/uid_map: %w", pidString, err)
}
defer uidmap.Close()
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err)
- return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString)
+ return fmt.Errorf("writing %q to /proc/%s/uid_map: %w", u.String(), pidString, err)
}
}
}
@@ -343,12 +343,12 @@ func (c *Cmd) Start() error {
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
- return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
+ return fmt.Errorf("opening /proc/%s/oom_score_adj: %w", pidString, err)
}
defer oomScoreAdj.Close()
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil {
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
- return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
+ return fmt.Errorf("writing \"%d\" to /proc/%s/oom_score_adj: %w", c.OOMScoreAdj, pidString, err)
}
}
// Run any additional setup that we want to do before the child starts running proper.
@@ -557,7 +557,7 @@ func ExecRunnable(cmd Runnable, cleanup func()) {
os.Exit(status)
}
if err := cmd.Run(); err != nil {
- if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
+ if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ProcessState.Exited() {
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
if waitStatus.Exited() {
@@ -583,7 +583,7 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
var mappings []specs.LinuxIDMapping
f, err := os.Open(path)
if err != nil {
- return nil, errors.Wrapf(err, "Reading ID mappings from %q", path)
+ return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
@@ -591,19 +591,19 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
line := scanner.Text()
fields := strings.Fields(line)
if len(fields) != 3 {
- return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
+ return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
}
cid, err := strconv.ParseUint(fields[0], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
+ return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err)
}
hid, err := strconv.ParseUint(fields[1], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
+ return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err)
}
size, err := strconv.ParseUint(fields[2], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
+ return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err)
}
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
}
@@ -631,7 +631,7 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
mappings, err := idtools.NewIDMappings(user, group)
if err != nil {
- return nil, nil, errors.Wrapf(err, "Reading subuid mappings for user %q and subgid mappings for group %q", user, group)
+ return nil, nil, fmt.Errorf("reading subuid mappings for user %q and subgid mappings for group %q: %w", user, group, err)
}
var uidmap, gidmap []specs.LinuxIDMapping
for _, m := range mappings.UIDs() {
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 4b074eea2..8297d3c2c 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -2,6 +2,7 @@ package storage
import (
"encoding/base64"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -27,7 +28,6 @@ import (
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
)
type updateNameOperation int
@@ -643,8 +643,12 @@ type store struct {
// return
// }
func GetStore(options types.StoreOptions) (Store, error) {
+ defaultOpts, err := types.Options()
+ if err != nil {
+ return nil, err
+ }
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
- options = types.Options()
+ options = defaultOpts
}
if options.GraphRoot != "" {
@@ -675,11 +679,11 @@ func GetStore(options types.StoreOptions) (Store, error) {
// if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither.
switch {
case options.RunRoot == "" && options.GraphRoot == "":
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified")
+ return nil, fmt.Errorf("no storage runroot or graphroot specified: %w", ErrIncompleteOptions)
case options.GraphRoot == "":
- options.GraphRoot = types.Options().GraphRoot
+ options.GraphRoot = defaultOpts.GraphRoot
case options.RunRoot == "":
- options.RunRoot = types.Options().RunRoot
+ options.RunRoot = defaultOpts.RunRoot
}
if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
@@ -1244,13 +1248,13 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID)
+ return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
- err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2))
+ err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err)
}
- return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID)
+ return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err)
}
layer = mappedLayer
}
@@ -1326,7 +1330,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
}
}
if cimage == nil {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", image)
+ return nil, fmt.Errorf("locating image with ID %q: %w", image, ErrImageUnknown)
}
imageID = cimage.ID
}
@@ -1399,7 +1403,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
mlabel, _ := options.Flags["MountLabel"].(string)
if (plabel == "" && mlabel != "") ||
(plabel != "" && mlabel == "") {
- return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified")
+ return nil, errors.New("processLabel and Mountlabel must either not be specified or both specified")
}
if plabel == "" {
@@ -1557,7 +1561,7 @@ func (s *store) ListImageBigData(id string) ([]string, error) {
return bigDataNames, err
}
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (s *store) ImageBigDataSize(id, key string) (int64, error) {
@@ -1635,9 +1639,9 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
}
}
if foundImage {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id)
+ return nil, fmt.Errorf("locating item named %q for image with ID %q (consider removing the image to resolve the issue): %w", key, id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
// ListLayerBigData retrieves a list of the (possibly large) chunks of
@@ -1668,9 +1672,9 @@ func (s *store) ListLayerBigData(id string) ([]string, error) {
}
}
if foundLayer {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating big data for layer with ID %q", id)
+ return nil, fmt.Errorf("locating big data for layer with ID %q: %w", id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
// LayerBigData retrieves a (possibly large) chunk of named data
@@ -1701,9 +1705,9 @@ func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) {
}
}
if foundLayer {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for layer with ID %q", key, id)
+ return nil, fmt.Errorf("locating item named %q for layer with ID %q: %w", key, id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
// SetLayerBigData stores a (possibly large) chunk of named data
@@ -1742,11 +1746,11 @@ func (s *store) ImageSize(id string) (int64, error) {
lstore, err := s.LayerStore()
if err != nil {
- return -1, errors.Wrapf(err, "error loading primary layer store data")
+ return -1, fmt.Errorf("loading primary layer store data: %w", err)
}
lstores, err := s.ROLayerStores()
if err != nil {
- return -1, errors.Wrapf(err, "error loading additional layer stores")
+ return -1, fmt.Errorf("loading additional layer stores: %w", err)
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
@@ -1760,11 +1764,11 @@ func (s *store) ImageSize(id string) (int64, error) {
var imageStore ROBigDataStore
istore, err := s.ImageStore()
if err != nil {
- return -1, errors.Wrapf(err, "error loading primary image store data")
+ return -1, fmt.Errorf("loading primary image store data: %w", err)
}
istores, err := s.ROImageStores()
if err != nil {
- return -1, errors.Wrapf(err, "error loading additional image stores")
+ return -1, fmt.Errorf("loading additional image stores: %w", err)
}
// Look for the image's record.
@@ -1781,7 +1785,7 @@ func (s *store) ImageSize(id string) (int64, error) {
}
}
if image == nil {
- return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
// Start with a list of the image's top layers, if it has any.
@@ -1811,7 +1815,7 @@ func (s *store) ImageSize(id string) (int64, error) {
}
}
if layer == nil {
- return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID)
+ return -1, fmt.Errorf("locating layer with ID %q: %w", layerID, ErrLayerUnknown)
}
// The UncompressedSize is only valid if there's a digest to go with it.
n := layer.UncompressedSize
@@ -1819,7 +1823,7 @@ func (s *store) ImageSize(id string) (int64, error) {
// Compute the size.
n, err = layerStore.DiffSize("", layer.ID)
if err != nil {
- return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID)
+ return -1, fmt.Errorf("size/digest of layer with ID %q could not be calculated: %w", layerID, err)
}
}
// Count this layer.
@@ -1834,12 +1838,12 @@ func (s *store) ImageSize(id string) (int64, error) {
// Count big data items.
names, err := imageStore.BigDataNames(id)
if err != nil {
- return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id)
+ return -1, fmt.Errorf("reading list of big data items for image %q: %w", id, err)
}
for _, name := range names {
n, err := imageStore.BigDataSize(id, name)
if err != nil {
- return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id)
+ return -1, fmt.Errorf("reading size of big data item %q for image %q: %w", name, id, err)
}
size += n
}
@@ -1899,24 +1903,24 @@ func (s *store) ContainerSize(id string) (int64, error) {
if layer, err = store.Get(container.LayerID); err == nil {
size, err = store.DiffSize("", layer.ID)
if err != nil {
- return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID)
+ return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
}
break
}
}
if layer == nil {
- return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID)
+ return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
}
// Count big data items.
names, err := rcstore.BigDataNames(id)
if err != nil {
- return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID)
+ return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
}
for _, name := range names {
n, err := rcstore.BigDataSize(id, name)
if err != nil {
- return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id)
+ return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
}
size += n
}
@@ -2334,7 +2338,7 @@ func (s *store) DeleteLayer(id string) error {
}
for _, layer := range layers {
if layer.Parent == id {
- return errors.Wrapf(ErrLayerHasChildren, "used by layer %v", layer.ID)
+ return fmt.Errorf("used by layer %v: %w", layer.ID, ErrLayerHasChildren)
}
}
images, err := ristore.Images()
@@ -2344,12 +2348,12 @@ func (s *store) DeleteLayer(id string) error {
for _, image := range images {
if image.TopLayer == id {
- return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID)
+ return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
}
if stringutils.InSlice(image.MappedTopLayers, id) {
// No write access to the image store, fail before the layer is deleted
if _, ok := ristore.(*imageStore); !ok {
- return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID)
+ return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
}
}
}
@@ -2359,11 +2363,11 @@ func (s *store) DeleteLayer(id string) error {
}
for _, container := range containers {
if container.LayerID == id {
- return errors.Wrapf(ErrLayerUsedByContainer, "layer %v used by container %v", id, container.ID)
+ return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer)
}
}
if err := rlstore.Delete(id); err != nil {
- return errors.Wrapf(err, "delete layer %v", id)
+ return fmt.Errorf("delete layer %v: %w", id, err)
}
// The check here is used to avoid iterating the images if we don't need to.
@@ -2372,7 +2376,7 @@ func (s *store) DeleteLayer(id string) error {
for _, image := range images {
if stringutils.InSlice(image.MappedTopLayers, id) {
if err = istore.removeMappedTopLayer(image.ID, id); err != nil {
- return errors.Wrapf(err, "remove mapped top layer %v from image %v", id, image.ID)
+ return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err)
}
}
}
@@ -2427,7 +2431,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
aContainerByImage[container.ImageID] = container.ID
}
if container, ok := aContainerByImage[id]; ok {
- return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container)
+ return nil, fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer)
}
images, err := ristore.Images()
if err != nil {
@@ -3045,7 +3049,7 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
}
storeLayers, err := m(store, d)
if err != nil {
- if errors.Cause(err) != ErrLayerUnknown {
+ if !errors.Is(err, ErrLayerUnknown) {
return nil, err
}
continue
@@ -3060,14 +3064,14 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
- return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
+ return nil, fmt.Errorf("looking for compressed layers matching digest %q: %w", d, err)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
}
func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
- return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
+ return nil, fmt.Errorf("looking for layers matching digest %q: %w", d, err)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
}
@@ -3347,7 +3351,7 @@ func (s *store) Image(id string) (*Image, error) {
return image, nil
}
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
@@ -3405,7 +3409,7 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
return nil, err
}
imageList, err := store.ByDigest(d)
- if err != nil && errors.Cause(err) != ErrImageUnknown {
+ if err != nil && !errors.Is(err, ErrImageUnknown) {
return nil, err
}
images = append(images, imageList...)
@@ -3601,7 +3605,7 @@ func (s *store) Shutdown(force bool) ([]string, error) {
}
}
if len(mounted) > 0 && err == nil {
- err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted")
+ err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer)
}
if err == nil {
err = s.graphDriver.Cleanup()
@@ -3715,7 +3719,10 @@ func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions
// GetDefaultMountOptions returns the default mountoptions defined in container/storage
func GetDefaultMountOptions() ([]string, error) {
- defaultStoreOptions := types.Options()
+ defaultStoreOptions, err := types.Options()
+ if err != nil {
+ return nil, err
+ }
return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions)
}
diff --git a/vendor/github.com/containers/storage/types/idmappings.go b/vendor/github.com/containers/storage/types/idmappings.go
index 82824ae2b..aabdf7a81 100644
--- a/vendor/github.com/containers/storage/types/idmappings.go
+++ b/vendor/github.com/containers/storage/types/idmappings.go
@@ -5,7 +5,6 @@ import (
"os"
"github.com/containers/storage/pkg/idtools"
- "github.com/pkg/errors"
)
// AutoUserNsOptions defines how to automatically create a user namespace.
@@ -77,18 +76,18 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
if subUIDMap != "" && subGIDMap != "" {
mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap)
+ return nil, fmt.Errorf("failed to create NewIDMappings for uidmap=%s gidmap=%s: %w", subUIDMap, subGIDMap, err)
}
options.UIDMap = mappings.UIDs()
options.GIDMap = mappings.GIDs()
}
parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID")
if err != nil {
- return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice)
+ return nil, fmt.Errorf("failed to create ParseUIDMap UID=%s: %w", UIDMapSlice, err)
}
parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID")
if err != nil {
- return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice)
+ return nil, fmt.Errorf("failed to create ParseGIDMap GID=%s: %w", UIDMapSlice, err)
}
options.UIDMap = append(options.UIDMap, parsedUIDMap...)
options.GIDMap = append(options.GIDMap, parsedGIDMap...)
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index a55bf62c3..5421c02da 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -1,6 +1,7 @@
package types
import (
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -32,29 +33,38 @@ const (
)
var (
- defaultStoreOptionsOnce sync.Once
+ defaultStoreOptionsOnce sync.Once
+ loadDefaultStoreOptionsErr error
)
-func loaddefaultStoreOptions() {
+func loadDefaultStoreOptions() {
defaultStoreOptions.RunRoot = defaultRunRoot
defaultStoreOptions.GraphRoot = defaultGraphRoot
defaultStoreOptions.GraphDriverName = ""
if path, ok := os.LookupEnv(storageConfEnv); ok {
defaultOverrideConfigFile = path
- }
-
- if _, err := os.Stat(defaultOverrideConfigFile); err == nil {
+ if err := ReloadConfigurationFileIfNeeded(path, &defaultStoreOptions); err != nil {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ } else if _, err := os.Stat(defaultOverrideConfigFile); err == nil {
// The DefaultConfigFile(rootless) function returns the path
// of the used storage.conf file, by returning defaultConfigFile
// If override exists containers/storage uses it by default.
defaultConfigFile = defaultOverrideConfigFile
- ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions)
+ if err := ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions); err != nil {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
} else {
if !os.IsNotExist(err) {
logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err)
}
- ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
+ if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
}
// reload could set values to empty for run and graph root if config does not contains anything
if defaultStoreOptions.RunRoot == "" {
@@ -73,7 +83,10 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
defaultRootlessGraphRoot string
err error
)
- defaultStoreOptionsOnce.Do(loaddefaultStoreOptions)
+ defaultStoreOptionsOnce.Do(loadDefaultStoreOptions)
+ if loadDefaultStoreOptionsErr != nil {
+ return StoreOptions{}, loadDefaultStoreOptionsErr
+ }
storageOpts := defaultStoreOptions
if rootless && rootlessUID != 0 {
storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)
@@ -218,7 +231,11 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
opts.GraphDriverName = overlayDriver
}
- if opts.GraphDriverName == overlayDriver {
+ // If the configuration file was explicitly set, then copy all the options
+ // present.
+ if defaultConfigFileSet {
+ opts.GraphDriverOptions = systemOpts.GraphDriverOptions
+ } else if opts.GraphDriverName == overlayDriver {
for _, o := range systemOpts.GraphDriverOptions {
if strings.Contains(o, "ignore_chown_errors") {
opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
@@ -251,40 +268,41 @@ var prevReloadConfig = struct {
}{}
// SetDefaultConfigFilePath sets the default configuration to the specified path
-func SetDefaultConfigFilePath(path string) {
+func SetDefaultConfigFilePath(path string) error {
defaultConfigFile = path
defaultConfigFileSet = true
- ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
+ return ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
}
-func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) {
+func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) error {
prevReloadConfig.mutex.Lock()
defer prevReloadConfig.mutex.Unlock()
fi, err := os.Stat(configFile)
if err != nil {
- if !os.IsNotExist(err) {
- fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
- }
- return
+ return err
}
mtime := fi.ModTime()
if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile {
*storeOptions = *prevReloadConfig.storeOptions
- return
+ return nil
}
- ReloadConfigurationFile(configFile, storeOptions)
+ if err := ReloadConfigurationFile(configFile, storeOptions); err != nil {
+ return err
+ }
- prevReloadConfig.storeOptions = storeOptions
+ cOptions := *storeOptions
+ prevReloadConfig.storeOptions = &cOptions
prevReloadConfig.mod = mtime
prevReloadConfig.configFile = configFile
+ return nil
}
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
-func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
+func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) error {
config := new(TomlConfig)
meta, err := toml.DecodeFile(configFile, &config)
@@ -296,7 +314,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
} else {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
- return
+ return err
}
}
@@ -359,7 +377,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
if err != nil {
fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
- return
+ return err
}
storeOptions.UIDMap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs()
@@ -367,16 +385,15 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
if err != nil {
- fmt.Print(err)
- } else {
- storeOptions.UIDMap = uidmap
+ return err
}
gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
if err != nil {
- fmt.Print(err)
- } else {
- storeOptions.GIDMap = gidmap
+ return err
}
+
+ storeOptions.UIDMap = uidmap
+ storeOptions.GIDMap = gidmap
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
if config.Storage.Options.AutoUsernsMinSize > 0 {
storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize
@@ -398,11 +415,12 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" {
storeOptions.GraphDriverOptions = nil
}
+ return nil
}
-func Options() StoreOptions {
- defaultStoreOptionsOnce.Do(loaddefaultStoreOptions)
- return defaultStoreOptions
+func Options() (StoreOptions, error) {
+ defaultStoreOptionsOnce.Do(loadDefaultStoreOptions)
+ return defaultStoreOptions, loadDefaultStoreOptionsErr
}
// Save overwrites the tomlConfig in storage.conf with the given conf
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index c7f0d0fad..88641d424 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -1,6 +1,7 @@
package types
import (
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -10,7 +11,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,7 +22,7 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
}
path = filepath.Join(path, "containers")
if err := os.MkdirAll(path, 0700); err != nil {
- return "", errors.Wrapf(err, "unable to make rootless runtime")
+ return "", fmt.Errorf("unable to make rootless runtime: %w", err)
}
return path, nil
}
@@ -132,7 +132,7 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) {
home := homedir.Get()
if home == "" {
- return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
+ return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err)
}
// runc doesn't like symlinks in the rootfs path, and at least
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 13ebd4021..e0e530275 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -1,6 +1,7 @@
package storage
import (
+ "fmt"
"os"
"os/user"
"path/filepath"
@@ -11,7 +12,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/containers/storage/types"
libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -164,7 +164,7 @@ outer:
}
continue outer
}
- return 0, errors.Errorf("cannot find layer %q", layerName)
+ return 0, fmt.Errorf("cannot find layer %q", layerName)
}
rlstore, err := s.LayerStore()
@@ -223,7 +223,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([
availableUIDs, availableGIDs, err := s.getAvailableIDs()
if err != nil {
- return nil, nil, errors.Wrapf(err, "cannot read mappings")
+ return nil, nil, fmt.Errorf("cannot read mappings: %w", err)
}
// Look every container that is using a user namespace and store
@@ -259,7 +259,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([
}
}
if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize {
- return nil, nil, errors.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize)
+ return nil, nil, fmt.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize)
}
}
diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go
index bb5cc2311..3dbe2b087 100644
--- a/vendor/github.com/coreos/go-systemd/activation/listeners.go
+++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go
@@ -67,7 +67,7 @@ func TLSListeners(tlsConfig *tls.Config) ([]net.Listener, error) {
return nil, err
}
- if tlsConfig != nil && err == nil {
+ if tlsConfig != nil {
for i, l := range listeners {
// Activate TLS only for TCP sockets
if l.Addr().Network() == "tcp" {
@@ -88,7 +88,7 @@ func TLSListenersWithNames(tlsConfig *tls.Config) (map[string][]net.Listener, er
return nil, err
}
- if tlsConfig != nil && err == nil {
+ if tlsConfig != nil {
for _, ll := range listeners {
// Activate TLS only for TCP sockets
for i, l := range ll {
diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go
new file mode 100644
index 000000000..60e82caa9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonpb
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/encoding/protojson"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapJSONUnmarshalV2 = false
+
+// UnmarshalNext unmarshals the next JSON object from d into m.
+func UnmarshalNext(d *json.Decoder, m proto.Message) error {
+ return new(Unmarshaler).UnmarshalNext(d, m)
+}
+
+// Unmarshal unmarshals a JSON object from r into m.
+func Unmarshal(r io.Reader, m proto.Message) error {
+ return new(Unmarshaler).Unmarshal(r, m)
+}
+
+// UnmarshalString unmarshals a JSON object from s into m.
+func UnmarshalString(s string, m proto.Message) error {
+ return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+ // AllowUnknownFields specifies whether to allow messages to contain
+ // unknown JSON fields, as opposed to failing to unmarshal.
+ AllowUnknownFields bool
+
+ // AnyResolver is used to resolve the google.protobuf.Any well-known type.
+ // If unset, the global registry is used by default.
+ AnyResolver AnyResolver
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
+// they are unmarshaled from JSON. Messages that implement this should also
+// implement JSONPBMarshaler so that the custom format can be produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+//
+// Deprecated: Custom types should implement protobuf reflection instead.
+type JSONPBUnmarshaler interface {
+ UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Unmarshal unmarshals a JSON object from r into m.
+func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
+ return u.UnmarshalNext(json.NewDecoder(r), m)
+}
+
+// UnmarshalNext unmarshals the next JSON object from d into m.
+func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
+ if m == nil {
+ return errors.New("invalid nil message")
+ }
+
+ // Parse the next JSON object from the stream.
+ raw := json.RawMessage{}
+ if err := d.Decode(&raw); err != nil {
+ return err
+ }
+
+ // Check for custom unmarshalers first since they may not properly
+ // implement protobuf reflection that the logic below relies on.
+ if jsu, ok := m.(JSONPBUnmarshaler); ok {
+ return jsu.UnmarshalJSONPB(u, raw)
+ }
+
+ mr := proto.MessageReflect(m)
+
+ // NOTE: For historical reasons, a top-level null is treated as a noop.
+ // This is incorrect, but kept for compatibility.
+ if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
+ return nil
+ }
+
+ if wrapJSONUnmarshalV2 {
+ // NOTE: If input message is non-empty, we need to preserve merge semantics
+ // of the old jsonpb implementation. These semantics are not supported by
+ // the protobuf JSON specification.
+ isEmpty := true
+ mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
+ isEmpty = false // at least one iteration implies non-empty
+ return false
+ })
+ if !isEmpty {
+ // Perform unmarshaling into a newly allocated, empty message.
+ mr = mr.New()
+
+ // Use a defer to copy all unmarshaled fields into the original message.
+ dst := proto.MessageReflect(m)
+ defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ dst.Set(fd, v)
+ return true
+ })
+ }
+
+ // Unmarshal using the v2 JSON unmarshaler.
+ opts := protojson.UnmarshalOptions{
+ DiscardUnknown: u.AllowUnknownFields,
+ }
+ if u.AnyResolver != nil {
+ opts.Resolver = anyResolver{u.AnyResolver}
+ }
+ return opts.Unmarshal(raw, mr.Interface())
+ } else {
+ if err := u.unmarshalMessage(mr, raw); err != nil {
+ return err
+ }
+ return protoV2.CheckInitialized(mr.Interface())
+ }
+}
+
+func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
+ return jsu.UnmarshalJSONPB(u, in)
+ }
+
+ if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
+ return nil
+ }
+
+ switch wellKnownType(md.FullName()) {
+ case "Any":
+ var jsonObject map[string]json.RawMessage
+ if err := json.Unmarshal(in, &jsonObject); err != nil {
+ return err
+ }
+
+ rawTypeURL, ok := jsonObject["@type"]
+ if !ok {
+ return errors.New("Any JSON doesn't have '@type'")
+ }
+ typeURL, err := unquoteString(string(rawTypeURL))
+ if err != nil {
+ return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
+ }
+ m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
+
+ var m2 protoreflect.Message
+ if u.AnyResolver != nil {
+ mi, err := u.AnyResolver.Resolve(typeURL)
+ if err != nil {
+ return err
+ }
+ m2 = proto.MessageReflect(mi)
+ } else {
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
+ if err != nil {
+ if err == protoregistry.NotFound {
+ return fmt.Errorf("could not resolve Any message type: %v", typeURL)
+ }
+ return err
+ }
+ m2 = mt.New()
+ }
+
+ if wellKnownType(m2.Descriptor().FullName()) != "" {
+ rawValue, ok := jsonObject["value"]
+ if !ok {
+ return errors.New("Any JSON doesn't have 'value'")
+ }
+ if err := u.unmarshalMessage(m2, rawValue); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
+ }
+ } else {
+ delete(jsonObject, "@type")
+ rawJSON, err := json.Marshal(jsonObject)
+ if err != nil {
+ return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+ }
+ if err = u.unmarshalMessage(m2, rawJSON); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
+ }
+ }
+
+ rawWire, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
+ }
+ m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
+ return nil
+ case "BoolValue", "BytesValue", "StringValue",
+ "Int32Value", "UInt32Value", "FloatValue",
+ "Int64Value", "UInt64Value", "DoubleValue":
+ fd := fds.ByNumber(1)
+ v, err := u.unmarshalValue(m.NewField(fd), in, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return nil
+ case "Duration":
+ v, err := unquoteString(string(in))
+ if err != nil {
+ return err
+ }
+ d, err := time.ParseDuration(v)
+ if err != nil {
+ return fmt.Errorf("bad Duration: %v", err)
+ }
+
+ sec := d.Nanoseconds() / 1e9
+ nsec := d.Nanoseconds() % 1e9
+ m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
+ m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
+ return nil
+ case "Timestamp":
+ v, err := unquoteString(string(in))
+ if err != nil {
+ return err
+ }
+ t, err := time.Parse(time.RFC3339Nano, v)
+ if err != nil {
+ return fmt.Errorf("bad Timestamp: %v", err)
+ }
+
+ sec := t.Unix()
+ nsec := t.Nanosecond()
+ m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
+ m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
+ return nil
+ case "Value":
+ switch {
+ case string(in) == "null":
+ m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
+ case string(in) == "true":
+ m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
+ case string(in) == "false":
+ m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
+ case hasPrefixAndSuffix('"', in, '"'):
+ s, err := unquoteString(string(in))
+ if err != nil {
+ return fmt.Errorf("unrecognized type for Value %q", in)
+ }
+ m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
+ case hasPrefixAndSuffix('[', in, ']'):
+ v := m.Mutable(fds.ByNumber(6))
+ return u.unmarshalMessage(v.Message(), in)
+ case hasPrefixAndSuffix('{', in, '}'):
+ v := m.Mutable(fds.ByNumber(5))
+ return u.unmarshalMessage(v.Message(), in)
+ default:
+ f, err := strconv.ParseFloat(string(in), 0)
+ if err != nil {
+ return fmt.Errorf("unrecognized type for Value %q", in)
+ }
+ m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
+ }
+ return nil
+ case "ListValue":
+ var jsonArray []json.RawMessage
+ if err := json.Unmarshal(in, &jsonArray); err != nil {
+ return fmt.Errorf("bad ListValue: %v", err)
+ }
+
+ lv := m.Mutable(fds.ByNumber(1)).List()
+ for _, raw := range jsonArray {
+ ve := lv.NewElement()
+ if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
+ return err
+ }
+ lv.Append(ve)
+ }
+ return nil
+ case "Struct":
+ var jsonObject map[string]json.RawMessage
+ if err := json.Unmarshal(in, &jsonObject); err != nil {
+ return fmt.Errorf("bad StructValue: %v", err)
+ }
+
+ mv := m.Mutable(fds.ByNumber(1)).Map()
+ for key, raw := range jsonObject {
+ kv := protoreflect.ValueOf(key).MapKey()
+ vv := mv.NewValue()
+ if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
+ return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
+ }
+ mv.Set(kv, vv)
+ }
+ return nil
+ }
+
+ var jsonObject map[string]json.RawMessage
+ if err := json.Unmarshal(in, &jsonObject); err != nil {
+ return err
+ }
+
+ // Handle known fields.
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if fd.IsWeak() && fd.Message().IsPlaceholder() {
+ continue // weak reference is not linked in
+ }
+
+ // Search for any raw JSON value associated with this field.
+ var raw json.RawMessage
+ name := string(fd.Name())
+ if fd.Kind() == protoreflect.GroupKind {
+ name = string(fd.Message().Name())
+ }
+ if v, ok := jsonObject[name]; ok {
+ delete(jsonObject, name)
+ raw = v
+ }
+ name = string(fd.JSONName())
+ if v, ok := jsonObject[name]; ok {
+ delete(jsonObject, name)
+ raw = v
+ }
+
+ field := m.NewField(fd)
+ // Unmarshal the field value.
+ if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
+ continue
+ }
+ v, err := u.unmarshalValue(field, raw, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ }
+
+ // Handle extension fields.
+ for name, raw := range jsonObject {
+ if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
+ continue
+ }
+
+ // Resolve the extension field by name.
+ xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(md) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ continue
+ }
+ delete(jsonObject, name)
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
+ }
+
+ field := m.NewField(fd)
+ // Unmarshal the field value.
+ if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
+ continue
+ }
+ v, err := u.unmarshalValue(field, raw, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ }
+
+ if !u.AllowUnknownFields && len(jsonObject) > 0 {
+ for name := range jsonObject {
+ return fmt.Errorf("unknown field %q in %v", name, md.FullName())
+ }
+ }
+ return nil
+}
+
+func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
+ if md := fd.Message(); md != nil {
+ return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
+ }
+ return false
+}
+
+func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
+ if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
+ _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
+ return ok
+ }
+ return false
+}
+
+func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ switch {
+ case fd.IsList():
+ var jsonArray []json.RawMessage
+ if err := json.Unmarshal(in, &jsonArray); err != nil {
+ return v, err
+ }
+ lv := v.List()
+ for _, raw := range jsonArray {
+ ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(ve)
+ }
+ return v, nil
+ case fd.IsMap():
+ var jsonObject map[string]json.RawMessage
+ if err := json.Unmarshal(in, &jsonObject); err != nil {
+ return v, err
+ }
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := v.Map()
+ for key, raw := range jsonObject {
+ var kv protoreflect.MapKey
+ if kfd.Kind() == protoreflect.StringKind {
+ kv = protoreflect.ValueOf(key).MapKey()
+ } else {
+ v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
+ if err != nil {
+ return v, err
+ }
+ kv = v.MapKey()
+ }
+
+ vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
+ if err != nil {
+ return v, err
+ }
+ mv.Set(kv, vv)
+ }
+ return v, nil
+ default:
+ return u.unmarshalSingularValue(v, in, fd)
+ }
+}
+
+var nonFinite = map[string]float64{
+ `"NaN"`: math.NaN(),
+ `"Infinity"`: math.Inf(+1),
+ `"-Infinity"`: math.Inf(-1),
+}
+
+func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return unmarshalValue(in, new(bool))
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ return unmarshalValue(trimQuote(in), new(int32))
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return unmarshalValue(trimQuote(in), new(int64))
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ return unmarshalValue(trimQuote(in), new(uint32))
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return unmarshalValue(trimQuote(in), new(uint64))
+ case protoreflect.FloatKind:
+ if f, ok := nonFinite[string(in)]; ok {
+ return protoreflect.ValueOfFloat32(float32(f)), nil
+ }
+ return unmarshalValue(trimQuote(in), new(float32))
+ case protoreflect.DoubleKind:
+ if f, ok := nonFinite[string(in)]; ok {
+ return protoreflect.ValueOfFloat64(float64(f)), nil
+ }
+ return unmarshalValue(trimQuote(in), new(float64))
+ case protoreflect.StringKind:
+ return unmarshalValue(in, new(string))
+ case protoreflect.BytesKind:
+ return unmarshalValue(in, new([]byte))
+ case protoreflect.EnumKind:
+ if hasPrefixAndSuffix('"', in, '"') {
+ vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
+ if vd == nil {
+ return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
+ }
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ return unmarshalValue(in, new(protoreflect.EnumNumber))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ err := u.unmarshalMessage(v.Message(), in)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+}
+
+func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
+ err := json.Unmarshal(in, v)
+ return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
+}
+
+func unquoteString(in string) (out string, err error) {
+ err = json.Unmarshal([]byte(in), &out)
+ return out, err
+}
+
+func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
+ if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
+ return true
+ }
+ return false
+}
+
+// trimQuote is like unquoteString but simply strips surrounding quotes.
+// This is incorrect, but is behavior done by the legacy implementation.
+func trimQuote(in []byte) []byte {
+ if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
+ in = in[1 : len(in)-1]
+ }
+ return in
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go
new file mode 100644
index 000000000..685c80a62
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go
@@ -0,0 +1,559 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonpb
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/encoding/protojson"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapJSONMarshalV2 = false
+
+// Marshaler is a configurable object for marshaling protocol buffer messages
+// to the specified JSON representation.
+type Marshaler struct {
+ // OrigName specifies whether to use the original protobuf name for fields.
+ OrigName bool
+
+ // EnumsAsInts specifies whether to render enum values as integers,
+ // as opposed to string values.
+ EnumsAsInts bool
+
+ // EmitDefaults specifies whether to render fields with zero values.
+ EmitDefaults bool
+
+ // Indent controls whether the output is compact or not.
+ // If empty, the output is compact JSON. Otherwise, every JSON object
+ // entry and JSON array value will be on its own line.
+ // Each line will be preceded by repeated copies of Indent, where the
+ // number of copies is the current indentation depth.
+ Indent string
+
+ // AnyResolver is used to resolve the google.protobuf.Any well-known type.
+ // If unset, the global registry is used by default.
+ AnyResolver AnyResolver
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should also
+// implement JSONPBUnmarshaler so that the custom format can be parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+//
+// Deprecated: Custom types should implement protobuf reflection instead.
+type JSONPBMarshaler interface {
+ MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// Marshal serializes a protobuf message as JSON into w.
+func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
+ b, err := jm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// MarshalToString serializes a protobuf message as JSON in string form.
+func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
+ b, err := jm.marshal(m)
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
+ v := reflect.ValueOf(m)
+ if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, errors.New("Marshal called with nil")
+ }
+
+ // Check for custom marshalers first since they may not properly
+ // implement protobuf reflection that the logic below relies on.
+ if jsm, ok := m.(JSONPBMarshaler); ok {
+ return jsm.MarshalJSONPB(jm)
+ }
+
+ if wrapJSONMarshalV2 {
+ opts := protojson.MarshalOptions{
+ UseProtoNames: jm.OrigName,
+ UseEnumNumbers: jm.EnumsAsInts,
+ EmitUnpopulated: jm.EmitDefaults,
+ Indent: jm.Indent,
+ }
+ if jm.AnyResolver != nil {
+ opts.Resolver = anyResolver{jm.AnyResolver}
+ }
+ return opts.Marshal(proto.MessageReflect(m).Interface())
+ } else {
+ // Check for unpopulated required fields first.
+ m2 := proto.MessageReflect(m)
+ if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
+ return nil, err
+ }
+
+ w := jsonWriter{Marshaler: jm}
+ err := w.marshalMessage(m2, "", "")
+ return w.buf, err
+ }
+}
+
+type jsonWriter struct {
+ *Marshaler
+ buf []byte
+}
+
+func (w *jsonWriter) write(s string) {
+ w.buf = append(w.buf, s...)
+}
+
+func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
+ if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
+ b, err := jsm.MarshalJSONPB(w.Marshaler)
+ if err != nil {
+ return err
+ }
+ if typeURL != "" {
+ // we are marshaling this object to an Any type
+ var js map[string]*json.RawMessage
+ if err = json.Unmarshal(b, &js); err != nil {
+ return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
+ }
+ turl, err := json.Marshal(typeURL)
+ if err != nil {
+ return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+ }
+ js["@type"] = (*json.RawMessage)(&turl)
+ if b, err = json.Marshal(js); err != nil {
+ return err
+ }
+ }
+ w.write(string(b))
+ return nil
+ }
+
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // Handle well-known types.
+ const secondInNanos = int64(time.Second / time.Nanosecond)
+ switch wellKnownType(md.FullName()) {
+ case "Any":
+ return w.marshalAny(m, indent)
+ case "BoolValue", "BytesValue", "StringValue",
+ "Int32Value", "UInt32Value", "FloatValue",
+ "Int64Value", "UInt64Value", "DoubleValue":
+ fd := fds.ByNumber(1)
+ return w.marshalValue(fd, m.Get(fd), indent)
+ case "Duration":
+ const maxSecondsInDuration = 315576000000
+ // "Generated output always contains 0, 3, 6, or 9 fractional digits,
+ // depending on required precision."
+ s := m.Get(fds.ByNumber(1)).Int()
+ ns := m.Get(fds.ByNumber(2)).Int()
+ if s < -maxSecondsInDuration || s > maxSecondsInDuration {
+ return fmt.Errorf("seconds out of range %v", s)
+ }
+ if ns <= -secondInNanos || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+ }
+ if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+ return errors.New("signs of seconds and nanos do not match")
+ }
+ var sign string
+ if s < 0 || ns < 0 {
+ sign, s, ns = "-", -1*s, -1*ns
+ }
+ x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ w.write(fmt.Sprintf(`"%vs"`, x))
+ return nil
+ case "Timestamp":
+ // "RFC 3339, where generated output will always be Z-normalized
+ // and uses 0, 3, 6 or 9 fractional digits."
+ s := m.Get(fds.ByNumber(1)).Int()
+ ns := m.Get(fds.ByNumber(2)).Int()
+ if ns < 0 || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+ }
+ t := time.Unix(s, ns).UTC()
+ // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+ x := t.Format("2006-01-02T15:04:05.000000000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ w.write(fmt.Sprintf(`"%vZ"`, x))
+ return nil
+ case "Value":
+ // JSON value; which is a null, number, string, bool, object, or array.
+ od := md.Oneofs().Get(0)
+ fd := m.WhichOneof(od)
+ if fd == nil {
+ return errors.New("nil Value")
+ }
+ return w.marshalValue(fd, m.Get(fd), indent)
+ case "Struct", "ListValue":
+ // JSON object or array.
+ fd := fds.ByNumber(1)
+ return w.marshalValue(fd, m.Get(fd), indent)
+ }
+
+ w.write("{")
+ if w.Indent != "" {
+ w.write("\n")
+ }
+
+ firstField := true
+ if typeURL != "" {
+ if err := w.marshalTypeURL(indent, typeURL); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ if fd == nil {
+ continue
+ }
+ } else {
+ i++
+ }
+
+ v := m.Get(fd)
+
+ if !m.Has(fd) {
+ if !w.EmitDefaults || fd.ContainingOneof() != nil {
+ continue
+ }
+ if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
+ v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
+ }
+ }
+
+ if !firstField {
+ w.writeComma()
+ }
+ if err := w.marshalField(fd, v, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ // Handle proto2 extensions.
+ if md.ExtensionRanges().Len() > 0 {
+ // Collect a sorted list of all extension descriptor and values.
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ if !firstField {
+ w.writeComma()
+ }
+ if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+ }
+
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ }
+ w.write("}")
+ return nil
+}
+
+func (w *jsonWriter) writeComma() {
+ if w.Indent != "" {
+ w.write(",\n")
+ } else {
+ w.write(",")
+ }
+}
+
+func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
+ // "If the Any contains a value that has a special JSON mapping,
+ // it will be converted as follows: {"@type": xxx, "value": yyy}.
+ // Otherwise, the value will be converted into a JSON object,
+ // and the "@type" field will be inserted to indicate the actual data type."
+ md := m.Descriptor()
+ typeURL := m.Get(md.Fields().ByNumber(1)).String()
+ rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
+
+ var m2 protoreflect.Message
+ if w.AnyResolver != nil {
+ mi, err := w.AnyResolver.Resolve(typeURL)
+ if err != nil {
+ return err
+ }
+ m2 = proto.MessageReflect(mi)
+ } else {
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
+ if err != nil {
+ return err
+ }
+ m2 = mt.New()
+ }
+
+ if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
+ return err
+ }
+
+ if wellKnownType(m2.Descriptor().FullName()) == "" {
+ return w.marshalMessage(m2, indent, typeURL)
+ }
+
+ w.write("{")
+ if w.Indent != "" {
+ w.write("\n")
+ }
+ if err := w.marshalTypeURL(indent, typeURL); err != nil {
+ return err
+ }
+ w.writeComma()
+ if w.Indent != "" {
+ w.write(indent)
+ w.write(w.Indent)
+ w.write(`"value": `)
+ } else {
+ w.write(`"value":`)
+ }
+ if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
+ return err
+ }
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ }
+ w.write("}")
+ return nil
+}
+
+func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
+ if w.Indent != "" {
+ w.write(indent)
+ w.write(w.Indent)
+ }
+ w.write(`"@type":`)
+ if w.Indent != "" {
+ w.write(" ")
+ }
+ b, err := json.Marshal(typeURL)
+ if err != nil {
+ return err
+ }
+ w.write(string(b))
+ return nil
+}
+
+// marshalField writes field description and value to the Writer.
+func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+ if w.Indent != "" {
+ w.write(indent)
+ w.write(w.Indent)
+ }
+ w.write(`"`)
+ switch {
+ case fd.IsExtension():
+ // For message set, use the fname of the message as the extension name.
+ name := string(fd.FullName())
+ if isMessageSet(fd.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ w.write("[" + name + "]")
+ case w.OrigName:
+ name := string(fd.Name())
+ if fd.Kind() == protoreflect.GroupKind {
+ name = string(fd.Message().Name())
+ }
+ w.write(name)
+ default:
+ w.write(string(fd.JSONName()))
+ }
+ w.write(`":`)
+ if w.Indent != "" {
+ w.write(" ")
+ }
+ return w.marshalValue(fd, v, indent)
+}
+
+func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+ switch {
+ case fd.IsList():
+ w.write("[")
+ comma := ""
+ lv := v.List()
+ for i := 0; i < lv.Len(); i++ {
+ w.write(comma)
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ w.write(w.Indent)
+ w.write(w.Indent)
+ }
+ if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
+ return err
+ }
+ comma = ","
+ }
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ w.write(w.Indent)
+ }
+ w.write("]")
+ return nil
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := v.Map()
+
+ // Collect a sorted list of all map keys and values.
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+
+ w.write(`{`)
+ comma := ""
+ for _, entry := range entries {
+ w.write(comma)
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ w.write(w.Indent)
+ w.write(w.Indent)
+ }
+
+ s := fmt.Sprint(entry.key.Interface())
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ w.write(string(b))
+
+ w.write(`:`)
+ if w.Indent != "" {
+ w.write(` `)
+ }
+
+ if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
+ return err
+ }
+ comma = ","
+ }
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ w.write(w.Indent)
+ }
+ w.write(`}`)
+ return nil
+ default:
+ return w.marshalSingularValue(fd, v, indent)
+ }
+}
+
+func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+ switch {
+ case !v.IsValid():
+ w.write("null")
+ return nil
+ case fd.Message() != nil:
+ return w.marshalMessage(v.Message(), indent+w.Indent, "")
+ case fd.Enum() != nil:
+ if fd.Enum().FullName() == "google.protobuf.NullValue" {
+ w.write("null")
+ return nil
+ }
+
+ vd := fd.Enum().Values().ByNumber(v.Enum())
+ if vd == nil || w.EnumsAsInts {
+ w.write(strconv.Itoa(int(v.Enum())))
+ } else {
+ w.write(`"` + string(vd.Name()) + `"`)
+ }
+ return nil
+ default:
+ switch v.Interface().(type) {
+ case float32, float64:
+ switch {
+ case math.IsInf(v.Float(), +1):
+ w.write(`"Infinity"`)
+ return nil
+ case math.IsInf(v.Float(), -1):
+ w.write(`"-Infinity"`)
+ return nil
+ case math.IsNaN(v.Float()):
+ w.write(`"NaN"`)
+ return nil
+ }
+ case int64, uint64:
+ w.write(fmt.Sprintf(`"%d"`, v.Interface()))
+ return nil
+ }
+
+ b, err := json.Marshal(v.Interface())
+ if err != nil {
+ return err
+ }
+ w.write(string(b))
+ return nil
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go
new file mode 100644
index 000000000..480e2448d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/json.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jsonpb provides functionality to marshal and unmarshal between a
+// protocol buffer message and JSON. It follows the specification at
+// https://developers.google.com/protocol-buffers/docs/proto3#json.
+//
+// Do not rely on the default behavior of the standard encoding/json package
+// when called on generated message types as it does not operate correctly.
+//
+// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
+// package instead.
+package jsonpb
+
+import (
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// AnyResolver takes a type URL, present in an Any message,
+// and resolves it into an instance of the associated message.
+type AnyResolver interface {
+ Resolve(typeURL string) (proto.Message, error)
+}
+
+type anyResolver struct{ AnyResolver }
+
+func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+ return r.FindMessageByURL(string(message))
+}
+
+func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ m, err := r.Resolve(url)
+ if err != nil {
+ return nil, err
+ }
+ return protoimpl.X.MessageTypeOf(m), nil
+}
+
+func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+func wellKnownType(s protoreflect.FullName) string {
+ if s.Parent() == "google.protobuf" {
+ switch s.Name() {
+ case "Empty", "Any",
+ "BoolValue", "BytesValue", "StringValue",
+ "Int32Value", "UInt32Value", "FloatValue",
+ "Int64Value", "UInt64Value", "DoubleValue",
+ "Duration", "Timestamp",
+ "NullValue", "Struct", "Value", "ListValue":
+ return string(s.Name())
+ }
+ }
+ return ""
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/google/go-containerregistry/LICENSE b/vendor/github.com/google/go-containerregistry/LICENSE
new file mode 100644
index 000000000..7a4a3ea24
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. \ No newline at end of file
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/vendor/github.com/google/go-containerregistry/pkg/name/README.md
new file mode 100644
index 000000000..4889b8446
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/README.md
@@ -0,0 +1,3 @@
+# `name`
+
+[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/vendor/github.com/google/go-containerregistry/pkg/name/check.go
new file mode 100644
index 000000000..e9a240a3e
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/check.go
@@ -0,0 +1,43 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+// stripRunesFn returns a function which returns -1 (i.e. a value which
+// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise.
+func stripRunesFn(runes string) func(rune) rune {
+ return func(r rune) rune {
+ if strings.ContainsRune(runes, r) {
+ return -1
+ }
+ return r
+ }
+}
+
+// checkElement checks a given named element matches character and length restrictions.
+// Returns true if the given element adheres to the given restrictions, false otherwise.
+func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error {
+ numRunes := utf8.RuneCountInString(element)
+ if (numRunes < minRunes) || (maxRunes < numRunes) {
+ return newErrBadName("%s must be between %d and %d characters in length: %s", name, minRunes, maxRunes, element)
+ } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 {
+ return newErrBadName("%s can only contain the characters `%s`: %s", name, allowedRunes, element)
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
new file mode 100644
index 000000000..e465aef49
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
@@ -0,0 +1,96 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "strings"
+)
+
+const (
+ // These have the form: sha256:<hex string>
+ // TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation.
+ digestChars = "sh:0123456789abcdef"
+ digestDelim = "@"
+)
+
+// Digest stores a digest name in a structured form.
+type Digest struct {
+ Repository
+ digest string
+ original string
+}
+
+// Ensure Digest implements Reference
+var _ Reference = (*Digest)(nil)
+
+// Context implements Reference.
+func (d Digest) Context() Repository {
+ return d.Repository
+}
+
+// Identifier implements Reference.
+func (d Digest) Identifier() string {
+ return d.DigestStr()
+}
+
+// DigestStr returns the digest component of the Digest.
+func (d Digest) DigestStr() string {
+ return d.digest
+}
+
+// Name returns the name from which the Digest was derived.
+func (d Digest) Name() string {
+ return d.Repository.Name() + digestDelim + d.DigestStr()
+}
+
+// String returns the original input string.
+func (d Digest) String() string {
+ return d.original
+}
+
+func checkDigest(name string) error {
+ return checkElement("digest", name, digestChars, 7+64, 7+64)
+}
+
+// NewDigest returns a new Digest representing the given name.
+func NewDigest(name string, opts ...Option) (Digest, error) {
+ // Split on "@"
+ parts := strings.Split(name, digestDelim)
+ if len(parts) != 2 {
+ return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name)
+ }
+ base := parts[0]
+ digest := parts[1]
+
+ // Always check that the digest is valid.
+ if err := checkDigest(digest); err != nil {
+ return Digest{}, err
+ }
+
+ tag, err := NewTag(base, opts...)
+ if err == nil {
+ base = tag.Repository.Name()
+ }
+
+ repo, err := NewRepository(base, opts...)
+ if err != nil {
+ return Digest{}, err
+ }
+ return Digest{
+ Repository: repo,
+ digest: digest,
+ original: name,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go
new file mode 100644
index 000000000..b294794dc
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go
@@ -0,0 +1,42 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package name defines structured types for representing image references.
+//
+// What's in a name? For image references, not nearly enough!
+//
+// Image references look a lot like URLs, but they differ in that they don't
+// contain the scheme (http or https), they can end with a :tag or a @digest
+// (the latter being validated), and they perform defaulting for missing
+// components.
+//
+// Since image references don't contain the scheme, we do our best to infer
+// if we use http or https from the given hostname. We allow http fallback for
+// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in
+// ".local", or is in the "private" address space per RFC 1918. For everything
+// else, we assume https only. To override this heuristic, use the Insecure
+// option.
+//
+// Image references with a digest signal to us that we should verify the content
+// of the image matches the digest. E.g. when pulling a Digest reference, we'll
+// calculate the sha256 of the manifest returned by the registry and error out
+// if it doesn't match what we asked for.
+//
+// For defaulting, we interpret "ubuntu" as
+// "index.docker.io/library/ubuntu:latest" because we add the missing repo
+// "library", the missing registry "index.docker.io", and the missing tag
+// "latest". To disable this defaulting, use the StrictValidation option. This
+// is useful e.g. to only allow image references that explicitly set a tag or
+// digest, so that you don't accidentally pull "latest".
+package name
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go
new file mode 100644
index 000000000..035e35069
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go
@@ -0,0 +1,42 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrBadName is an error for when a bad docker name is supplied.
+type ErrBadName struct {
+ info string
+}
+
+func (e *ErrBadName) Error() string {
+ return e.info
+}
+
+// newErrBadName returns a ErrBadName which returns the given formatted string from Error().
+func newErrBadName(fmtStr string, args ...interface{}) *ErrBadName {
+ return &ErrBadName{fmt.Sprintf(fmtStr, args...)}
+}
+
+// IsErrBadName returns true if the given error is an ErrBadName.
+//
+// Deprecated: Use errors.Is.
+func IsErrBadName(err error) bool {
+ var berr *ErrBadName
+ return errors.As(err, &berr)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/vendor/github.com/google/go-containerregistry/pkg/name/options.go
new file mode 100644
index 000000000..d14fedcda
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/options.go
@@ -0,0 +1,83 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+const (
+ // DefaultRegistry is the registry name that will be used if no registry
+ // provided and the default is not overridden.
+ DefaultRegistry = "index.docker.io"
+ defaultRegistryAlias = "docker.io"
+
+ // DefaultTag is the tag name that will be used if no tag provided and the
+ // default is not overridden.
+ DefaultTag = "latest"
+)
+
+type options struct {
+ strict bool // weak by default
+ insecure bool // secure by default
+ defaultRegistry string
+ defaultTag string
+}
+
+func makeOptions(opts ...Option) options {
+ opt := options{
+ defaultRegistry: DefaultRegistry,
+ defaultTag: DefaultTag,
+ }
+ for _, o := range opts {
+ o(&opt)
+ }
+ return opt
+}
+
+// Option is a functional option for name parsing.
+type Option func(*options)
+
+// StrictValidation is an Option that requires image references to be fully
+// specified; i.e. no defaulting for registry (dockerhub), repo (library),
+// or tag (latest).
+func StrictValidation(opts *options) {
+ opts.strict = true
+}
+
+// WeakValidation is an Option that sets defaults when parsing names, see
+// StrictValidation.
+func WeakValidation(opts *options) {
+ opts.strict = false
+}
+
+// Insecure is an Option that allows image references to be fetched without TLS.
+func Insecure(opts *options) {
+ opts.insecure = true
+}
+
+// OptionFn is a function that returns an option.
+type OptionFn func() Option
+
+// WithDefaultRegistry sets the default registry that will be used if one is not
+// provided.
+func WithDefaultRegistry(r string) Option {
+ return func(opts *options) {
+ opts.defaultRegistry = r
+ }
+}
+
+// WithDefaultTag sets the default tag that will be used if one is not provided.
+func WithDefaultTag(t string) Option {
+ return func(opts *options) {
+ opts.defaultTag = t
+ }
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go
new file mode 100644
index 000000000..955c59a7b
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go
@@ -0,0 +1,75 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "fmt"
+)
+
+// Reference defines the interface that consumers use when they can
+// take either a tag or a digest.
+type Reference interface {
+ fmt.Stringer
+
+ // Context accesses the Repository context of the reference.
+ Context() Repository
+
+ // Identifier accesses the type-specific portion of the reference.
+ Identifier() string
+
+ // Name is the fully-qualified reference name.
+ Name() string
+
+ // Scope is the scope needed to access this reference.
+ Scope(string) string
+}
+
+// ParseReference parses the string as a reference, either by tag or digest.
+func ParseReference(s string, opts ...Option) (Reference, error) {
+ if t, err := NewTag(s, opts...); err == nil {
+ return t, nil
+ }
+ if d, err := NewDigest(s, opts...); err == nil {
+ return d, nil
+ }
+ return nil, newErrBadName("could not parse reference: " + s)
+}
+
+type stringConst string
+
+// MustParseReference behaves like ParseReference, but panics instead of
+// returning an error. It's intended for use in tests, or when a value is
+// expected to be valid at code authoring time.
+//
+// To discourage its use in scenarios where the value is not known at code
+// authoring time, it must be passed a string constant:
+//
+// const str = "valid/string"
+// MustParseReference(str)
+// MustParseReference("another/valid/string")
+// MustParseReference(str + "/and/more")
+//
+// These will not compile:
+//
+// var str = "valid/string"
+// MustParseReference(str)
+// MustParseReference(strings.Join([]string{"valid", "string"}, "/"))
+func MustParseReference(s stringConst, opts ...Option) Reference {
+ ref, err := ParseReference(string(s), opts...)
+ if err != nil {
+ panic(err)
+ }
+ return ref
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
new file mode 100644
index 000000000..2a26b66d0
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
@@ -0,0 +1,136 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "net"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Detect more complex forms of local references.
+var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`)
+
+// Detect the loopback IP (127.0.0.1)
+var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1"))
+
+// Detect the loopback IPV6 (::1)
+var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1"))
+
+// Registry stores a docker registry name in a structured form.
+type Registry struct {
+ insecure bool
+ registry string
+}
+
+// RegistryStr returns the registry component of the Registry.
+func (r Registry) RegistryStr() string {
+ return r.registry
+}
+
+// Name returns the name from which the Registry was derived.
+func (r Registry) Name() string {
+ return r.RegistryStr()
+}
+
+func (r Registry) String() string {
+ return r.Name()
+}
+
+// Scope returns the scope required to access the registry.
+func (r Registry) Scope(string) string {
+ // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z
+ return "registry:catalog:*"
+}
+
+func (r Registry) isRFC1918() bool {
+ ipStr := strings.Split(r.Name(), ":")[0]
+ ip := net.ParseIP(ipStr)
+ if ip == nil {
+ return false
+ }
+ for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} {
+ _, block, _ := net.ParseCIDR(cidr)
+ if block.Contains(ip) {
+ return true
+ }
+ }
+ return false
+}
+
+// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined.
+func (r Registry) Scheme() string {
+ if r.insecure {
+ return "http"
+ }
+ if r.isRFC1918() {
+ return "http"
+ }
+ if strings.HasPrefix(r.Name(), "localhost:") {
+ return "http"
+ }
+ if reLocal.MatchString(r.Name()) {
+ return "http"
+ }
+ if reLoopback.MatchString(r.Name()) {
+ return "http"
+ }
+ if reipv6Loopback.MatchString(r.Name()) {
+ return "http"
+ }
+ return "https"
+}
+
+func checkRegistry(name string) error {
+ // Per RFC 3986, registries (authorities) are required to be prefixed with "//"
+ // url.Host == hostname[:port] == authority
+ if url, err := url.Parse("//" + name); err != nil || url.Host != name {
+ return newErrBadName("registries must be valid RFC 3986 URI authorities: %s", name)
+ }
+ return nil
+}
+
+// NewRegistry returns a Registry based on the given name.
+// Strict validation requires explicit, valid RFC 3986 URI authorities to be given.
+func NewRegistry(name string, opts ...Option) (Registry, error) {
+ opt := makeOptions(opts...)
+ if opt.strict && len(name) == 0 {
+ return Registry{}, newErrBadName("strict validation requires the registry to be explicitly defined")
+ }
+
+ if err := checkRegistry(name); err != nil {
+ return Registry{}, err
+ }
+
+ if name == "" {
+ name = opt.defaultRegistry
+ }
+ // Rewrite "docker.io" to "index.docker.io".
+ // See: https://github.com/google/go-containerregistry/issues/68
+ if name == defaultRegistryAlias {
+ name = DefaultRegistry
+ }
+
+ return Registry{registry: name, insecure: opt.insecure}, nil
+}
+
+// NewInsecureRegistry returns an Insecure Registry based on the given name.
+//
+// Deprecated: Use the Insecure Option with NewRegistry instead.
+func NewInsecureRegistry(name string, opts ...Option) (Registry, error) {
+ opts = append(opts, Insecure)
+ return NewRegistry(name, opts...)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go
new file mode 100644
index 000000000..9250e3625
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go
@@ -0,0 +1,121 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ defaultNamespace = "library"
+ repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./"
+ regRepoDelimiter = "/"
+)
+
+// Repository stores a docker repository name in a structured form.
+type Repository struct {
+ Registry
+ repository string
+}
+
+// See https://docs.docker.com/docker-hub/official_repos
+func hasImplicitNamespace(repo string, reg Registry) bool {
+ return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry
+}
+
+// RepositoryStr returns the repository component of the Repository.
+func (r Repository) RepositoryStr() string {
+ if hasImplicitNamespace(r.repository, r.Registry) {
+ return fmt.Sprintf("%s/%s", defaultNamespace, r.repository)
+ }
+ return r.repository
+}
+
+// Name returns the name from which the Repository was derived.
+func (r Repository) Name() string {
+ regName := r.Registry.Name()
+ if regName != "" {
+ return regName + regRepoDelimiter + r.RepositoryStr()
+ }
+ // TODO: As far as I can tell, this is unreachable.
+ return r.RepositoryStr()
+}
+
+func (r Repository) String() string {
+ return r.Name()
+}
+
+// Scope returns the scope required to perform the given action on the registry.
+// TODO(jonjohnsonjr): consider moving scopes to a separate package.
+func (r Repository) Scope(action string) string {
+ return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action)
+}
+
+func checkRepository(repository string) error {
+ return checkElement("repository", repository, repositoryChars, 2, 255)
+}
+
+// NewRepository returns a new Repository representing the given name, according to the given strictness.
+func NewRepository(name string, opts ...Option) (Repository, error) {
+ opt := makeOptions(opts...)
+ if len(name) == 0 {
+ return Repository{}, newErrBadName("a repository name must be specified")
+ }
+
+ var registry string
+ repo := name
+ parts := strings.SplitN(name, regRepoDelimiter, 2)
+ if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) {
+ // The first part of the repository is treated as the registry domain
+ // iff it contains a '.' or ':' character, otherwise it is all repository
+ // and the domain defaults to Docker Hub.
+ registry = parts[0]
+ repo = parts[1]
+ }
+
+ if err := checkRepository(repo); err != nil {
+ return Repository{}, err
+ }
+
+ reg, err := NewRegistry(registry, opts...)
+ if err != nil {
+ return Repository{}, err
+ }
+ if hasImplicitNamespace(repo, reg) && opt.strict {
+ return Repository{}, newErrBadName("strict validation requires the full repository path (missing 'library')")
+ }
+ return Repository{reg, repo}, nil
+}
+
+// Tag returns a Tag in this Repository.
+func (r Repository) Tag(identifier string) Tag {
+ t := Tag{
+ tag: identifier,
+ Repository: r,
+ }
+ t.original = t.Name()
+ return t
+}
+
+// Digest returns a Digest in this Repository.
+func (r Repository) Digest(identifier string) Digest {
+ d := Digest{
+ digest: identifier,
+ Repository: r,
+ }
+ d.original = d.Name()
+ return d
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go
new file mode 100644
index 000000000..66bd1bec3
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go
@@ -0,0 +1,108 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "strings"
+)
+
+const (
+ // TODO(dekkagaijin): use the docker/distribution regexes for validation.
+ tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ tagDelim = ":"
+)
+
+// Tag stores a docker tag name in a structured form.
+type Tag struct {
+ Repository
+ tag string
+ original string
+}
+
+// Ensure Tag implements Reference
+var _ Reference = (*Tag)(nil)
+
+// Context implements Reference.
+func (t Tag) Context() Repository {
+ return t.Repository
+}
+
+// Identifier implements Reference.
+func (t Tag) Identifier() string {
+ return t.TagStr()
+}
+
+// TagStr returns the tag component of the Tag.
+func (t Tag) TagStr() string {
+ return t.tag
+}
+
+// Name returns the name from which the Tag was derived.
+func (t Tag) Name() string {
+ return t.Repository.Name() + tagDelim + t.TagStr()
+}
+
+// String returns the original input string.
+func (t Tag) String() string {
+ return t.original
+}
+
+// Scope returns the scope required to perform the given action on the tag.
+func (t Tag) Scope(action string) string {
+ return t.Repository.Scope(action)
+}
+
+func checkTag(name string) error {
+ return checkElement("tag", name, tagChars, 1, 128)
+}
+
+// NewTag returns a new Tag representing the given name, according to the given strictness.
+func NewTag(name string, opts ...Option) (Tag, error) {
+ opt := makeOptions(opts...)
+ base := name
+ tag := ""
+
+ // Split on ":"
+ parts := strings.Split(name, tagDelim)
+ // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation.
+ if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) {
+ base = strings.Join(parts[:len(parts)-1], tagDelim)
+ tag = parts[len(parts)-1]
+ }
+
+ // We don't require a tag, but if we get one check it's valid,
+ // even when not being strict.
+ // If we are being strict, we want to validate the tag regardless in case
+ // it's empty.
+ if tag != "" || opt.strict {
+ if err := checkTag(tag); err != nil {
+ return Tag{}, err
+ }
+ }
+
+ if tag == "" {
+ tag = opt.defaultTag
+ }
+
+ repo, err := NewRepository(base, opts...)
+ if err != nil {
+ return Tag{}, err
+ }
+ return Tag{
+ Repository: repo,
+ tag: tag,
+ original: name,
+ }, nil
+}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 5c3c2a258..c7cf1a20c 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,27 @@ This package provides various compression algorithms.
# changelog
+* June 29, 2022 (v1.15.7)
+
+ * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
+ * zip: Merge upstream https://github.com/klauspost/compress/pull/631
+ * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
+ * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
+ * flate: Faster histograms https://github.com/klauspost/compress/pull/620
+ * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
+
+* June 3, 2022 (v1.15.6)
+ * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
+ * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
+ * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
+ * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
+ * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
+ * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
+ * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
+ * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
+ * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
+ * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
+
* May 25, 2022 (v1.15.5)
* s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
* s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 25f6d1108..40ef45c2f 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -169,7 +169,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
b := w.offsetEncoding.codes
b = b[:len(a)]
for i, v := range a {
- if v != 0 && b[i].len == 0 {
+ if v != 0 && b[i].zero() {
return false
}
}
@@ -178,7 +178,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
b = w.literalEncoding.codes[256:literalCount]
b = b[:len(a)]
for i, v := range a {
- if v != 0 && b[i].len == 0 {
+ if v != 0 && b[i].zero() {
return false
}
}
@@ -186,7 +186,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
a = t.litHist[:256]
b = w.literalEncoding.codes[:len(a)]
for i, v := range a {
- if v != 0 && b[i].len == 0 {
+ if v != 0 && b[i].zero() {
return false
}
}
@@ -280,12 +280,12 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE
// Copy the concatenated code sizes to codegen. Put a marker at the end.
cgnl := codegen[:numLiterals]
for i := range cgnl {
- cgnl[i] = uint8(litEnc.codes[i].len)
+ cgnl[i] = litEnc.codes[i].len()
}
cgnl = codegen[numLiterals : numLiterals+numOffsets]
for i := range cgnl {
- cgnl[i] = uint8(offEnc.codes[i].len)
+ cgnl[i] = offEnc.codes[i].len()
}
codegen[numLiterals+numOffsets] = badCode
@@ -428,8 +428,8 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
func (w *huffmanBitWriter) writeCode(c hcode) {
// The function does not get inlined if we "& 63" the shift.
- w.bits |= uint64(c.code) << (w.nbits & 63)
- w.nbits += c.len
+ w.bits |= c.code64() << (w.nbits & 63)
+ w.nbits += c.len()
if w.nbits >= 48 {
w.writeOutBits()
}
@@ -477,7 +477,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
w.writeBits(int32(numCodegens-4), 4)
for i := 0; i < numCodegens; i++ {
- value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
w.writeBits(int32(value), 3)
}
@@ -670,7 +670,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
// Estimate size for using a new table.
// Use the previous header size as the best estimate.
newSize := w.lastHeader + tokens.EstimatedBits()
- newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty
+ newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
// The estimated size is calculated as an optimal table.
// We add a penalty to make it more realistic and re-use a bit more.
@@ -854,8 +854,8 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
if t < 256 {
//w.writeCode(lits[t.literal()])
c := lits[t]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -882,8 +882,8 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := lengths[lengthCode]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -931,8 +931,8 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := offs[offsetCode]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -1009,8 +1009,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
}
}
- // Fill is rarely better...
- const fill = false
const numLiterals = endBlockMarker + 1
const numOffsets = 1
@@ -1019,7 +1017,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Assume header is around 70 bytes:
// https://stackoverflow.com/a/25454430
const guessHeaderSizeBits = 70 * 8
- histogram(input, w.literalFreq[:numLiterals], fill)
+ histogram(input, w.literalFreq[:numLiterals])
ssize, storable := w.storedSize(input)
if storable && len(input) > 1024 {
// Quick check for incompressible content.
@@ -1045,19 +1043,14 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
}
w.literalFreq[endBlockMarker] = 1
w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
- if fill {
- // Clear fill...
- for i := range w.literalFreq[:numLiterals] {
- w.literalFreq[i] = 0
- }
- histogram(input, w.literalFreq[:numLiterals], false)
- }
estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
- estBits += w.lastHeader
- if w.lastHeader == 0 {
- estBits += guessHeaderSizeBits
+ if estBits < math.MaxInt32 {
+ estBits += w.lastHeader
+ if w.lastHeader == 0 {
+ estBits += guessHeaderSizeBits
+ }
+ estBits += estBits >> w.logNewTablePenalty
}
- estBits += estBits >> w.logNewTablePenalty
// Store bytes, if we don't get a reasonable improvement.
if storable && ssize <= estBits {
@@ -1134,12 +1127,12 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
nbytes = 0
}
a, b := encoding[input[0]], encoding[input[1]]
- bits |= uint64(a.code) << (nbits & 63)
- bits |= uint64(b.code) << ((nbits + a.len) & 63)
+ bits |= a.code64() << (nbits & 63)
+ bits |= b.code64() << ((nbits + a.len()) & 63)
c := encoding[input[2]]
- nbits += b.len + a.len
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ nbits += b.len() + a.len()
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
input = input[3:]
}
@@ -1165,10 +1158,11 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
}
// Bitwriting inlined, ~30% speedup
c := encoding[t]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ bits |= c.code64() << (nbits & 63)
+
+ nbits += c.len()
if debugDeflate {
- count += int(c.len)
+ count += int(c.len())
}
}
// Restore...
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index 9ab497c27..5ac144f28 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -16,9 +16,18 @@ const (
)
// hcode is a huffman code with a bit code and bit length.
-type hcode struct {
- code uint16
- len uint8
+type hcode uint32
+
+func (h hcode) len() uint8 {
+ return uint8(h)
+}
+
+func (h hcode) code64() uint64 {
+ return uint64(h >> 8)
+}
+
+func (h hcode) zero() bool {
+ return h == 0
}
type huffmanEncoder struct {
@@ -58,8 +67,11 @@ type levelInfo struct {
// set sets the code and length of an hcode.
func (h *hcode) set(code uint16, length uint8) {
- h.len = length
- h.code = code
+ *h = hcode(length) | (hcode(code) << 8)
+}
+
+func newhcode(code uint16, length uint8) hcode {
+ return hcode(length) | (hcode(code) << 8)
}
func reverseBits(number uint16, bitLength byte) uint16 {
@@ -100,7 +112,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
bits = ch + 192 - 280
size = 8
}
- codes[ch] = hcode{code: reverseBits(bits, size), len: size}
+ codes[ch] = newhcode(reverseBits(bits, size), size)
}
return h
}
@@ -109,7 +121,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder {
h := newHuffmanEncoder(30)
codes := h.codes
for ch := range codes {
- codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
+ codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
}
return h
}
@@ -121,7 +133,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
- total += int(f) * int(h.codes[i].len)
+ total += int(f) * int(h.codes[i].len())
}
}
return total
@@ -130,7 +142,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
var total int
for _, f := range b {
- total += int(h.codes[f].len)
+ total += int(h.codes[f].len())
}
return total
}
@@ -141,10 +153,10 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
for i, f := range freq {
if f != 0 {
code := h.codes[i]
- if code.len == 0 {
+ if code.zero() {
return math.MaxInt32
}
- total += int(f) * int(code.len)
+ total += int(f) * int(code.len())
}
}
return total
@@ -308,7 +320,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
sortByLiteral(chunk)
for _, node := range chunk {
- h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)}
+ h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
code++
}
list = list[0 : len(list)-int(bits)]
@@ -330,7 +342,7 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list[count] = literalNode{uint16(i), f}
count++
} else {
- codes[i].len = 0
+ codes[i] = 0
}
}
list[count] = literalNode{}
@@ -364,21 +376,37 @@ func atLeastOne(v float32) float32 {
return v
}
-// Unassigned values are assigned '1' in the histogram.
-func fillHist(b []uint16) {
- for i, v := range b {
- if v == 0 {
- b[i] = 1
+func histogram(b []byte, h []uint16) {
+ if true && len(b) >= 8<<10 {
+ // Split for bigger inputs
+ histogramSplit(b, h)
+ } else {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
}
}
}
-func histogram(b []byte, h []uint16, fill bool) {
+func histogramSplit(b []byte, h []uint16) {
+ // Tested, and slightly faster than 2-way.
+ // Writing to separate arrays and combining is also slightly slower.
h = h[:256]
- for _, t := range b {
- h[t]++
+ for len(b)&3 != 0 {
+ h[b[0]]++
+ b = b[1:]
}
- if fill {
- fillHist(h)
+ n := len(b) / 4
+ x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
+ y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
+ for i, t := range x {
+ v0 := &h[t]
+ v1 := &h[y[i]]
+ v3 := &h[w[i]]
+ v2 := &h[z[i]]
+ *v0++
+ *v1++
+ *v2++
+ *v3++
}
}
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
index 544162a43..93a1d1503 100644
--- a/vendor/github.com/klauspost/compress/flate/stateless.go
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -59,9 +59,9 @@ var bitWriterPool = sync.Pool{
},
}
-// StatelessDeflate allows to compress directly to a Writer without retaining state.
+// StatelessDeflate allows compressing directly to a Writer without retaining state.
// When returning everything will be flushed.
-// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
+// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
// Longer dictionaries will be truncated and will still produce valid output.
// Sending nil dictionary is perfectly fine.
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index 671e630a8..9f3e9f79e 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -27,10 +27,7 @@ func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
const fallback8BitSize = 800
type decompress4xContext struct {
- pbr0 *bitReaderShifted
- pbr1 *bitReaderShifted
- pbr2 *bitReaderShifted
- pbr3 *bitReaderShifted
+ pbr *[4]bitReaderShifted
peekBits uint8
out *byte
dstEvery int
@@ -89,10 +86,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
ctx := decompress4xContext{
- pbr0: &br[0],
- pbr1: &br[1],
- pbr2: &br[2],
- pbr3: &br[3],
+ pbr: &br,
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
out: &out[0],
dstEvery: dstEvery,
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
index 6c65c6e2b..dd1a5aecd 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -4,45 +4,40 @@
// +build amd64,!appengine,!noasm,gc
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
-TEXT ·decompress4x_main_loop_amd64(SB), $8-8
+TEXT ·decompress4x_main_loop_amd64(SB), $0-8
XORQ DX, DX
// Preload values
MOVQ ctx+0(FP), AX
- MOVBQZX 32(AX), SI
- MOVQ 40(AX), DI
- MOVQ DI, BX
- MOVQ 72(AX), CX
- MOVQ CX, (SP)
- MOVQ 48(AX), R8
- MOVQ 56(AX), R9
- MOVQ (AX), R10
- MOVQ 8(AX), R11
- MOVQ 16(AX), R12
- MOVQ 24(AX), R13
+ MOVBQZX 8(AX), DI
+ MOVQ 16(AX), SI
+ MOVQ 48(AX), BX
+ MOVQ 24(AX), R9
+ MOVQ 32(AX), R10
+ MOVQ (AX), R11
// Main loop
main_loop:
- MOVQ BX, DI
- CMPQ DI, (SP)
+ MOVQ SI, R8
+ CMPQ R8, BX
SETGE DL
// br0.fillFast32()
- MOVQ 32(R10), R14
- MOVBQZX 40(R10), R15
- CMPQ R15, $0x20
+ MOVQ 32(R11), R12
+ MOVBQZX 40(R11), R13
+ CMPQ R13, $0x20
JBE skip_fill0
- MOVQ 24(R10), AX
- SUBQ $0x20, R15
+ MOVQ 24(R11), AX
+ SUBQ $0x20, R13
SUBQ $0x04, AX
- MOVQ (R10), BP
+ MOVQ (R11), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (AX)(BP*1), BP
- MOVQ R15, CX
- SHLQ CL, BP
- MOVQ AX, 24(R10)
- ORQ BP, R14
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 24(R11)
+ ORQ R14, R12
// exhausted = exhausted || (br0.off < 4)
CMPQ AX, $0x04
@@ -51,57 +46,57 @@ main_loop:
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
- MOVQ R14, BP
- MOVQ SI, CX
- SHRQ CL, BP
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v0 := table[val0&mask]
- MOVW (R9)(BP*2), CX
+ MOVW (R10)(R14*2), CX
// br0.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R14
- ADDB CL, R15
+ SHLQ CL, R12
+ ADDB CL, R13
// val1 := br0.peekTopBits(peekBits)
- MOVQ SI, CX
- MOVQ R14, BP
- SHRQ CL, BP
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
// v1 := table[val1&mask]
- MOVW (R9)(BP*2), CX
+ MOVW (R10)(R14*2), CX
// br0.advance(uint8(v1.entry))
MOVB CH, AH
- SHLQ CL, R14
- ADDB CL, R15
+ SHLQ CL, R12
+ ADDB CL, R13
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
- MOVW AX, (DI)
+ MOVW AX, (R8)
- // update the bitrader reader structure
- MOVQ R14, 32(R10)
- MOVB R15, 40(R10)
- ADDQ R8, DI
+ // update the bitreader structure
+ MOVQ R12, 32(R11)
+ MOVB R13, 40(R11)
+ ADDQ R9, R8
// br1.fillFast32()
- MOVQ 32(R11), R14
- MOVBQZX 40(R11), R15
- CMPQ R15, $0x20
+ MOVQ 80(R11), R12
+ MOVBQZX 88(R11), R13
+ CMPQ R13, $0x20
JBE skip_fill1
- MOVQ 24(R11), AX
- SUBQ $0x20, R15
+ MOVQ 72(R11), AX
+ SUBQ $0x20, R13
SUBQ $0x04, AX
- MOVQ (R11), BP
+ MOVQ 48(R11), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (AX)(BP*1), BP
- MOVQ R15, CX
- SHLQ CL, BP
- MOVQ AX, 24(R11)
- ORQ BP, R14
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 72(R11)
+ ORQ R14, R12
// exhausted = exhausted || (br1.off < 4)
CMPQ AX, $0x04
@@ -110,57 +105,57 @@ skip_fill0:
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
- MOVQ R14, BP
- MOVQ SI, CX
- SHRQ CL, BP
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v0 := table[val0&mask]
- MOVW (R9)(BP*2), CX
+ MOVW (R10)(R14*2), CX
// br1.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R14
- ADDB CL, R15
+ SHLQ CL, R12
+ ADDB CL, R13
// val1 := br1.peekTopBits(peekBits)
- MOVQ SI, CX
- MOVQ R14, BP
- SHRQ CL, BP
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
// v1 := table[val1&mask]
- MOVW (R9)(BP*2), CX
+ MOVW (R10)(R14*2), CX
// br1.advance(uint8(v1.entry))
MOVB CH, AH
- SHLQ CL, R14
- ADDB CL, R15
+ SHLQ CL, R12
+ ADDB CL, R13
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
- MOVW AX, (DI)
+ MOVW AX, (R8)
- // update the bitrader reader structure
- MOVQ R14, 32(R11)
- MOVB R15, 40(R11)
- ADDQ R8, DI
+ // update the bitreader structure
+ MOVQ R12, 80(R11)
+ MOVB R13, 88(R11)
+ ADDQ R9, R8
// br2.fillFast32()
- MOVQ 32(R12), R14
- MOVBQZX 40(R12), R15
- CMPQ R15, $0x20
+ MOVQ 128(R11), R12
+ MOVBQZX 136(R11), R13
+ CMPQ R13, $0x20
JBE skip_fill2
- MOVQ 24(R12), AX
- SUBQ $0x20, R15
+ MOVQ 120(R11), AX
+ SUBQ $0x20, R13
SUBQ $0x04, AX
- MOVQ (R12), BP
+ MOVQ 96(R11), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (AX)(BP*1), BP
- MOVQ R15, CX
- SHLQ CL, BP
- MOVQ AX, 24(R12)
- ORQ BP, R14
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 120(R11)
+ ORQ R14, R12
// exhausted = exhausted || (br2.off < 4)
CMPQ AX, $0x04
@@ -169,57 +164,57 @@ skip_fill1:
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
- MOVQ R14, BP
- MOVQ SI, CX
- SHRQ CL, BP
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v0 := table[val0&mask]
- MOVW (R9)(BP*2), CX
+ MOVW (R10)(R14*2), CX
// br2.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R14
- ADDB CL, R15
+ SHLQ CL, R12
+ ADDB CL, R13
// val1 := br2.peekTopBits(peekBits)
- MOVQ SI, CX
- MOVQ R14, BP
- SHRQ CL, BP
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
// v1 := table[val1&mask]
- MOVW (R9)(BP*2), CX
+ MOVW (R10)(R14*2), CX
// br2.advance(uint8(v1.entry))
MOVB CH, AH
- SHLQ CL, R14
- ADDB CL, R15
+ SHLQ CL, R12
+ ADDB CL, R13
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
- MOVW AX, (DI)
+ MOVW AX, (R8)
- // update the bitrader reader structure
- MOVQ R14, 32(R12)
- MOVB R15, 40(R12)
- ADDQ R8, DI
+ // update the bitreader structure
+ MOVQ R12, 128(R11)
+ MOVB R13, 136(R11)
+ ADDQ R9, R8
// br3.fillFast32()
- MOVQ 32(R13), R14
- MOVBQZX 40(R13), R15
- CMPQ R15, $0x20
+ MOVQ 176(R11), R12
+ MOVBQZX 184(R11), R13
+ CMPQ R13, $0x20
JBE skip_fill3
- MOVQ 24(R13), AX
- SUBQ $0x20, R15
+ MOVQ 168(R11), AX
+ SUBQ $0x20, R13
SUBQ $0x04, AX
- MOVQ (R13), BP
+ MOVQ 144(R11), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (AX)(BP*1), BP
- MOVQ R15, CX
- SHLQ CL, BP
- MOVQ AX, 24(R13)
- ORQ BP, R14
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 168(R11)
+ ORQ R14, R12
// exhausted = exhausted || (br3.off < 4)
CMPQ AX, $0x04
@@ -228,149 +223,142 @@ skip_fill2:
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
- MOVQ R14, BP
- MOVQ SI, CX
- SHRQ CL, BP
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v0 := table[val0&mask]
- MOVW (R9)(BP*2), CX
+ MOVW (R10)(R14*2), CX
// br3.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R14
- ADDB CL, R15
+ SHLQ CL, R12
+ ADDB CL, R13
// val1 := br3.peekTopBits(peekBits)
- MOVQ SI, CX
- MOVQ R14, BP
- SHRQ CL, BP
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
// v1 := table[val1&mask]
- MOVW (R9)(BP*2), CX
+ MOVW (R10)(R14*2), CX
// br3.advance(uint8(v1.entry))
MOVB CH, AH
- SHLQ CL, R14
- ADDB CL, R15
+ SHLQ CL, R12
+ ADDB CL, R13
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
- MOVW AX, (DI)
+ MOVW AX, (R8)
- // update the bitrader reader structure
- MOVQ R14, 32(R13)
- MOVB R15, 40(R13)
- ADDQ $0x02, BX
+ // update the bitreader structure
+ MOVQ R12, 176(R11)
+ MOVB R13, 184(R11)
+ ADDQ $0x02, SI
TESTB DL, DL
JZ main_loop
MOVQ ctx+0(FP), AX
- MOVQ 40(AX), CX
- MOVQ BX, DX
- SUBQ CX, DX
- SHLQ $0x02, DX
- MOVQ DX, 64(AX)
+ SUBQ 16(AX), SI
+ SHLQ $0x02, SI
+ MOVQ SI, 40(AX)
RET
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
-TEXT ·decompress4x_8b_main_loop_amd64(SB), $16-8
+TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
XORQ DX, DX
// Preload values
MOVQ ctx+0(FP), CX
- MOVBQZX 32(CX), BX
- MOVQ 40(CX), SI
- MOVQ SI, (SP)
- MOVQ 72(CX), DX
- MOVQ DX, 8(SP)
- MOVQ 48(CX), DI
- MOVQ 56(CX), R8
- MOVQ (CX), R9
- MOVQ 8(CX), R10
- MOVQ 16(CX), R11
- MOVQ 24(CX), R12
+ MOVBQZX 8(CX), DI
+ MOVQ 16(CX), BX
+ MOVQ 48(CX), SI
+ MOVQ 24(CX), R9
+ MOVQ 32(CX), R10
+ MOVQ (CX), R11
// Main loop
main_loop:
- MOVQ (SP), SI
- CMPQ SI, 8(SP)
+ MOVQ BX, R8
+ CMPQ R8, SI
SETGE DL
- // br1000.fillFast32()
- MOVQ 32(R9), R13
- MOVBQZX 40(R9), R14
- CMPQ R14, $0x20
- JBE skip_fill1000
- MOVQ 24(R9), R15
- SUBQ $0x20, R14
- SUBQ $0x04, R15
- MOVQ (R9), BP
+ // br0.fillFast32()
+ MOVQ 32(R11), R12
+ MOVBQZX 40(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill0
+ MOVQ 24(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ (R11), R15
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (R15)(BP*1), BP
- MOVQ R14, CX
- SHLQ CL, BP
- MOVQ R15, 24(R9)
- ORQ BP, R13
-
- // exhausted = exhausted || (br1000.off < 4)
- CMPQ R15, $0x04
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 24(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ R14, $0x04
SETLT AL
ORB AL, DL
-skip_fill1000:
+skip_fill0:
// val0 := br0.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v0 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br0.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
// val1 := br0.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v1 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br0.advance(uint8(v1.entry)
MOVB CH, AH
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
BSWAPL AX
// val2 := br0.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v2 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br0.advance(uint8(v2.entry)
MOVB CH, AH
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
// val3 := br0.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v3 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br0.advance(uint8(v3.entry)
MOVB CH, AL
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
BSWAPL AX
// these four writes get coalesced
@@ -378,88 +366,88 @@ skip_fill1000:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
- MOVL AX, (SI)
-
- // update the bitreader reader structure
- MOVQ R13, 32(R9)
- MOVB R14, 40(R9)
- ADDQ DI, SI
-
- // br1001.fillFast32()
- MOVQ 32(R10), R13
- MOVBQZX 40(R10), R14
- CMPQ R14, $0x20
- JBE skip_fill1001
- MOVQ 24(R10), R15
- SUBQ $0x20, R14
- SUBQ $0x04, R15
- MOVQ (R10), BP
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 32(R11)
+ MOVB R13, 40(R11)
+ ADDQ R9, R8
+
+ // br1.fillFast32()
+ MOVQ 80(R11), R12
+ MOVBQZX 88(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill1
+ MOVQ 72(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 48(R11), R15
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (R15)(BP*1), BP
- MOVQ R14, CX
- SHLQ CL, BP
- MOVQ R15, 24(R10)
- ORQ BP, R13
-
- // exhausted = exhausted || (br1001.off < 4)
- CMPQ R15, $0x04
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 72(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ R14, $0x04
SETLT AL
ORB AL, DL
-skip_fill1001:
+skip_fill1:
// val0 := br1.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v0 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br1.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
// val1 := br1.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v1 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br1.advance(uint8(v1.entry)
MOVB CH, AH
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
BSWAPL AX
// val2 := br1.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v2 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br1.advance(uint8(v2.entry)
MOVB CH, AH
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
// val3 := br1.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v3 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br1.advance(uint8(v3.entry)
MOVB CH, AL
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
BSWAPL AX
// these four writes get coalesced
@@ -467,88 +455,88 @@ skip_fill1001:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
- MOVL AX, (SI)
-
- // update the bitreader reader structure
- MOVQ R13, 32(R10)
- MOVB R14, 40(R10)
- ADDQ DI, SI
-
- // br1002.fillFast32()
- MOVQ 32(R11), R13
- MOVBQZX 40(R11), R14
- CMPQ R14, $0x20
- JBE skip_fill1002
- MOVQ 24(R11), R15
- SUBQ $0x20, R14
- SUBQ $0x04, R15
- MOVQ (R11), BP
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 80(R11)
+ MOVB R13, 88(R11)
+ ADDQ R9, R8
+
+ // br2.fillFast32()
+ MOVQ 128(R11), R12
+ MOVBQZX 136(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill2
+ MOVQ 120(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 96(R11), R15
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (R15)(BP*1), BP
- MOVQ R14, CX
- SHLQ CL, BP
- MOVQ R15, 24(R11)
- ORQ BP, R13
-
- // exhausted = exhausted || (br1002.off < 4)
- CMPQ R15, $0x04
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 120(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ R14, $0x04
SETLT AL
ORB AL, DL
-skip_fill1002:
+skip_fill2:
// val0 := br2.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v0 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br2.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
// val1 := br2.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v1 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br2.advance(uint8(v1.entry)
MOVB CH, AH
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
BSWAPL AX
// val2 := br2.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v2 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br2.advance(uint8(v2.entry)
MOVB CH, AH
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
// val3 := br2.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v3 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br2.advance(uint8(v3.entry)
MOVB CH, AL
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
BSWAPL AX
// these four writes get coalesced
@@ -556,88 +544,88 @@ skip_fill1002:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
- MOVL AX, (SI)
-
- // update the bitreader reader structure
- MOVQ R13, 32(R11)
- MOVB R14, 40(R11)
- ADDQ DI, SI
-
- // br1003.fillFast32()
- MOVQ 32(R12), R13
- MOVBQZX 40(R12), R14
- CMPQ R14, $0x20
- JBE skip_fill1003
- MOVQ 24(R12), R15
- SUBQ $0x20, R14
- SUBQ $0x04, R15
- MOVQ (R12), BP
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 128(R11)
+ MOVB R13, 136(R11)
+ ADDQ R9, R8
+
+ // br3.fillFast32()
+ MOVQ 176(R11), R12
+ MOVBQZX 184(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill3
+ MOVQ 168(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 144(R11), R15
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (R15)(BP*1), BP
- MOVQ R14, CX
- SHLQ CL, BP
- MOVQ R15, 24(R12)
- ORQ BP, R13
-
- // exhausted = exhausted || (br1003.off < 4)
- CMPQ R15, $0x04
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 168(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ R14, $0x04
SETLT AL
ORB AL, DL
-skip_fill1003:
+skip_fill3:
// val0 := br3.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v0 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br3.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
// val1 := br3.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v1 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br3.advance(uint8(v1.entry)
MOVB CH, AH
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
BSWAPL AX
// val2 := br3.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v2 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br3.advance(uint8(v2.entry)
MOVB CH, AH
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
// val3 := br3.peekTopBits(peekBits)
- MOVQ R13, R15
- MOVQ BX, CX
- SHRQ CL, R15
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
// v3 := table[val0&mask]
- MOVW (R8)(R15*2), CX
+ MOVW (R10)(R14*2), CX
// br3.advance(uint8(v3.entry)
MOVB CH, AL
- SHLQ CL, R13
- ADDB CL, R14
+ SHLQ CL, R12
+ ADDB CL, R13
BSWAPL AX
// these four writes get coalesced
@@ -645,20 +633,18 @@ skip_fill1003:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
- MOVL AX, (SI)
+ MOVL AX, (R8)
- // update the bitreader reader structure
- MOVQ R13, 32(R12)
- MOVB R14, 40(R12)
- ADDQ $0x04, (SP)
+ // update the bitreader structure
+ MOVQ R12, 176(R11)
+ MOVB R13, 184(R11)
+ ADDQ $0x04, BX
TESTB DL, DL
JZ main_loop
MOVQ ctx+0(FP), AX
- MOVQ 40(AX), CX
- MOVQ (SP), DX
- SUBQ CX, DX
- SHLQ $0x02, DX
- MOVQ DX, 64(AX)
+ SUBQ 16(AX), BX
+ SHLQ $0x02, BX
+ MOVQ BX, 40(AX)
RET
// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
@@ -750,10 +736,8 @@ loop_condition:
// Update ctx structure
MOVQ ctx+0(FP), AX
- MOVQ DX, CX
- MOVQ 16(AX), DX
- SUBQ DX, CX
- MOVQ CX, 40(AX)
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
MOVQ (AX), AX
MOVQ R9, 24(AX)
MOVQ R10, 32(AX)
@@ -847,10 +831,8 @@ loop_condition:
// Update ctx structure
MOVQ ctx+0(FP), AX
- MOVQ DX, CX
- MOVQ 16(AX), DX
- SUBQ DX, CX
- MOVQ CX, 40(AX)
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
MOVQ (AX), AX
MOVQ R9, 24(AX)
MOVQ R10, 32(AX)
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index 23333b969..2f8860a72 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -180,7 +180,6 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
}
b.advance((bitCount + 7) >> 3)
- // println(s.norm[:s.symbolLen], s.symbolLen)
return s.buildDtable()
}
@@ -269,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) {
s.dt[0] = symbol
}
-// buildDtable will build the decoding table.
-func (s *fseDecoder) buildDtable() error {
- tableSize := uint32(1 << s.actualTableLog)
- highThreshold := tableSize - 1
- symbolNext := s.stateTable[:256]
-
- // Init, lay down lowprob symbols
- {
- for i, v := range s.norm[:s.symbolLen] {
- if v == -1 {
- s.dt[highThreshold].setAddBits(uint8(i))
- highThreshold--
- symbolNext[i] = 1
- } else {
- symbolNext[i] = uint16(v)
- }
- }
- }
- // Spread symbols
- {
- tableMask := tableSize - 1
- step := tableStep(tableSize)
- position := uint32(0)
- for ss, v := range s.norm[:s.symbolLen] {
- for i := 0; i < int(v); i++ {
- s.dt[position].setAddBits(uint8(ss))
- position = (position + step) & tableMask
- for position > highThreshold {
- // lowprob area
- position = (position + step) & tableMask
- }
- }
- }
- if position != 0 {
- // position must reach all cells once, otherwise normalizedCounter is incorrect
- return errors.New("corrupted input (position != 0)")
- }
- }
-
- // Build Decoding table
- {
- tableSize := uint16(1 << s.actualTableLog)
- for u, v := range s.dt[:tableSize] {
- symbol := v.addBits()
- nextState := symbolNext[symbol]
- symbolNext[symbol] = nextState + 1
- nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
- s.dt[u&maxTableMask].setNBits(nBits)
- newState := (nextState << nBits) - tableSize
- if newState > tableSize {
- return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
- }
- if newState == uint16(u) && nBits == 0 {
- // Seems weird that this is possible with nbits > 0.
- return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
- }
- s.dt[u&maxTableMask].setNewState(newState)
- }
- }
- return nil
-}
-
// transform will transform the decoder table into a table usable for
// decoding without having to apply the transformation while decoding.
// The state will contain the base value and the number of bits to read.
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
new file mode 100644
index 000000000..e74df436c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
@@ -0,0 +1,64 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+)
+
+type buildDtableAsmContext struct {
+ // inputs
+ stateTable *uint16
+ norm *int16
+ dt *uint64
+
+ // outputs --- set by the procedure in the case of error;
+ // for interpretation please see the error handling part below
+ errParam1 uint64
+ errParam2 uint64
+}
+
+// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
+// Function returns non-zero exit code on error.
+// go:noescape
+func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+
+// please keep in sync with _generate/gen_fse.go
+const (
+ errorCorruptedNormalizedCounter = 1
+ errorNewStateTooBig = 2
+ errorNewStateNoBits = 3
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ ctx := buildDtableAsmContext{
+ stateTable: (*uint16)(&s.stateTable[0]),
+ norm: (*int16)(&s.norm[0]),
+ dt: (*uint64)(&s.dt[0]),
+ }
+ code := buildDtable_asm(s, &ctx)
+
+ if code != 0 {
+ switch code {
+ case errorCorruptedNormalizedCounter:
+ position := ctx.errParam1
+ return fmt.Errorf("corrupted input (position=%d, expected 0)", position)
+
+ case errorNewStateTooBig:
+ newState := decSymbol(ctx.errParam1)
+ size := ctx.errParam2
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, size)
+
+ case errorNewStateNoBits:
+ newState := decSymbol(ctx.errParam1)
+ oldState := decSymbol(ctx.errParam2)
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState)
+
+ default:
+ return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
new file mode 100644
index 000000000..da32b4420
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -0,0 +1,127 @@
+// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+// +build !appengine,!noasm,gc,!noasm
+
+// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+TEXT ·buildDtable_asm(SB), $0-24
+ MOVQ ctx+8(FP), CX
+ MOVQ s+0(FP), DI
+
+ // Load values
+ MOVBQZX 4098(DI), DX
+ XORQ AX, AX
+ BTSQ DX, AX
+ MOVQ (CX), BX
+ MOVQ 16(CX), SI
+ LEAQ -1(AX), R8
+ MOVQ 8(CX), CX
+ MOVWQZX 4096(DI), DI
+
+ // End load values
+ // Init, lay down lowprob symbols
+ XORQ R9, R9
+ JMP init_main_loop_condition
+
+init_main_loop:
+ MOVWQSX (CX)(R9*2), R10
+ CMPW R10, $-1
+ JNE do_not_update_high_threshold
+ MOVB R9, 1(SI)(R8*8)
+ DECQ R8
+ MOVQ $0x0000000000000001, R10
+
+do_not_update_high_threshold:
+ MOVW R10, (BX)(R9*2)
+ INCQ R9
+
+init_main_loop_condition:
+ CMPQ R9, DI
+ JL init_main_loop
+
+ // Spread symbols
+ // Calculate table step
+ MOVQ AX, R9
+ SHRQ $0x01, R9
+ MOVQ AX, R10
+ SHRQ $0x03, R10
+ LEAQ 3(R9)(R10*1), R9
+
+ // Fill add bits values
+ LEAQ -1(AX), R10
+ XORQ R11, R11
+ XORQ R12, R12
+ JMP spread_main_loop_condition
+
+spread_main_loop:
+ XORQ R13, R13
+ MOVWQSX (CX)(R12*2), R14
+ JMP spread_inner_loop_condition
+
+spread_inner_loop:
+ MOVB R12, 1(SI)(R11*8)
+
+adjust_position:
+ ADDQ R9, R11
+ ANDQ R10, R11
+ CMPQ R11, R8
+ JG adjust_position
+ INCQ R13
+
+spread_inner_loop_condition:
+ CMPQ R13, R14
+ JL spread_inner_loop
+ INCQ R12
+
+spread_main_loop_condition:
+ CMPQ R12, DI
+ JL spread_main_loop
+ TESTQ R11, R11
+ JZ spread_check_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R11, 24(AX)
+ MOVQ $+1, ret+16(FP)
+ RET
+
+spread_check_ok:
+ // Build Decoding table
+ XORQ DI, DI
+
+build_table_main_table:
+ MOVBQZX 1(SI)(DI*8), CX
+ MOVWQZX (BX)(CX*2), R8
+ LEAQ 1(R8), R9
+ MOVW R9, (BX)(CX*2)
+ MOVQ R8, R9
+ BSRQ R9, R9
+ MOVQ DX, CX
+ SUBQ R9, CX
+ SHLQ CL, R8
+ SUBQ AX, R8
+ MOVB CL, (SI)(DI*8)
+ MOVW R8, 2(SI)(DI*8)
+ CMPQ R8, AX
+ JLE build_table_check1_ok
+ MOVQ ctx+8(FP), CX
+ MOVQ R8, 24(CX)
+ MOVQ AX, 32(CX)
+ MOVQ $+2, ret+16(FP)
+ RET
+
+build_table_check1_ok:
+ TESTB CL, CL
+ JNZ build_table_check2_ok
+ CMPW R8, DI
+ JNE build_table_check2_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R8, 24(AX)
+ MOVQ DI, 32(AX)
+ MOVQ $+3, ret+16(FP)
+ RET
+
+build_table_check2_ok:
+ INCQ DI
+ CMPQ DI, AX
+ JL build_table_main_table
+ MOVQ $+0, ret+16(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
new file mode 100644
index 000000000..332e51fe4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -0,0 +1,72 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ symbolNext := s.stateTable[:256]
+
+ // Init, lay down lowprob symbols
+ {
+ for i, v := range s.norm[:s.symbolLen] {
+ if v == -1 {
+ s.dt[highThreshold].setAddBits(uint8(i))
+ highThreshold--
+ symbolNext[i] = 1
+ } else {
+ symbolNext[i] = uint16(v)
+ }
+ }
+ }
+
+ // Spread symbols
+ {
+ tableMask := tableSize - 1
+ step := tableStep(tableSize)
+ position := uint32(0)
+ for ss, v := range s.norm[:s.symbolLen] {
+ for i := 0; i < int(v); i++ {
+ s.dt[position].setAddBits(uint8(ss))
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ // lowprob area
+ position = (position + step) & tableMask
+ }
+ }
+ }
+ if position != 0 {
+ // position must reach all cells once, otherwise normalizedCounter is incorrect
+ return errors.New("corrupted input (position != 0)")
+ }
+ }
+
+ // Build Decoding table
+ {
+ tableSize := uint16(1 << s.actualTableLog)
+ for u, v := range s.dt[:tableSize] {
+ symbol := v.addBits()
+ nextState := symbolNext[symbol]
+ symbolNext[symbol] = nextState + 1
+ nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+ s.dt[u&maxTableMask].setNBits(nBits)
+ newState := (nextState << nBits) - tableSize
+ if newState > tableSize {
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+ }
+ if newState == uint16(u) && nBits == 0 {
+ // Seems weird that this is possible with nbits > 0.
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+ }
+ s.dt[u&maxTableMask].setNewState(newState)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 212c6cac3..71e64e061 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -134,18 +134,17 @@ sequenceDecs_decode_amd64_fill_2_end:
MOVBQZX DI, R14
SHRQ $0x10, DI
MOVWQZX DI, DI
- CMPQ R14, $0x00
- JZ sequenceDecs_decode_amd64_llState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R14, BX
+ LEAQ (BX)(R14*1), CX
MOVQ DX, R15
- SHLQ CL, R15
- MOVQ R14, CX
- NEGQ CX
- SHRQ CL, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
ADDQ R15, DI
-sequenceDecs_decode_amd64_llState_updateState_skip_zero:
// Load ctx.llTable
MOVQ ctx+16(FP), CX
MOVQ (CX), CX
@@ -155,18 +154,17 @@ sequenceDecs_decode_amd64_llState_updateState_skip_zero:
MOVBQZX R8, R14
SHRQ $0x10, R8
MOVWQZX R8, R8
- CMPQ R14, $0x00
- JZ sequenceDecs_decode_amd64_mlState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R14, BX
+ LEAQ (BX)(R14*1), CX
MOVQ DX, R15
- SHLQ CL, R15
- MOVQ R14, CX
- NEGQ CX
- SHRQ CL, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
ADDQ R15, R8
-sequenceDecs_decode_amd64_mlState_updateState_skip_zero:
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
@@ -176,18 +174,17 @@ sequenceDecs_decode_amd64_mlState_updateState_skip_zero:
MOVBQZX R9, R14
SHRQ $0x10, R9
MOVWQZX R9, R9
- CMPQ R14, $0x00
- JZ sequenceDecs_decode_amd64_ofState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R14, BX
+ LEAQ (BX)(R14*1), CX
MOVQ DX, R15
- SHLQ CL, R15
- MOVQ R14, CX
- NEGQ CX
- SHRQ CL, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
ADDQ R15, R9
-sequenceDecs_decode_amd64_ofState_updateState_skip_zero:
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX
@@ -416,18 +413,17 @@ sequenceDecs_decode_56_amd64_fill_end:
MOVBQZX DI, R14
SHRQ $0x10, DI
MOVWQZX DI, DI
- CMPQ R14, $0x00
- JZ sequenceDecs_decode_56_amd64_llState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R14, BX
+ LEAQ (BX)(R14*1), CX
MOVQ DX, R15
- SHLQ CL, R15
- MOVQ R14, CX
- NEGQ CX
- SHRQ CL, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
ADDQ R15, DI
-sequenceDecs_decode_56_amd64_llState_updateState_skip_zero:
// Load ctx.llTable
MOVQ ctx+16(FP), CX
MOVQ (CX), CX
@@ -437,18 +433,17 @@ sequenceDecs_decode_56_amd64_llState_updateState_skip_zero:
MOVBQZX R8, R14
SHRQ $0x10, R8
MOVWQZX R8, R8
- CMPQ R14, $0x00
- JZ sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R14, BX
+ LEAQ (BX)(R14*1), CX
MOVQ DX, R15
- SHLQ CL, R15
- MOVQ R14, CX
- NEGQ CX
- SHRQ CL, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
ADDQ R15, R8
-sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero:
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
@@ -458,18 +453,17 @@ sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero:
MOVBQZX R9, R14
SHRQ $0x10, R9
MOVWQZX R9, R9
- CMPQ R14, $0x00
- JZ sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R14, BX
+ LEAQ (BX)(R14*1), CX
MOVQ DX, R15
- SHLQ CL, R15
- MOVQ R14, CX
- NEGQ CX
- SHRQ CL, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
ADDQ R15, R9
-sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero:
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX
@@ -1181,52 +1175,65 @@ check_offset:
JG error_match_off_too_big
// Copy match from history
- MOVQ R12, R11
- SUBQ DI, R11
- JLS copy_match
- MOVQ R9, R14
- SUBQ R11, R14
- CMPQ R13, R11
- JGE copy_all_from_history
- XORQ R11, R11
- TESTQ $0x00000001, R13
- JZ copy_4_word
- MOVB (R14)(R11*1), R12
- MOVB R12, (BX)(R11*1)
- ADDQ $0x01, R11
-
-copy_4_word:
- TESTQ $0x00000002, R13
- JZ copy_4_dword
- MOVW (R14)(R11*1), R12
- MOVW R12, (BX)(R11*1)
- ADDQ $0x02, R11
-
-copy_4_dword:
- TESTQ $0x00000004, R13
- JZ copy_4_qword
- MOVL (R14)(R11*1), R12
- MOVL R12, (BX)(R11*1)
- ADDQ $0x04, R11
-
-copy_4_qword:
- TESTQ $0x00000008, R13
- JZ copy_4_test
- MOVQ (R14)(R11*1), R12
- MOVQ R12, (BX)(R11*1)
- ADDQ $0x08, R11
- JMP copy_4_test
-
-copy_4:
- MOVUPS (R14)(R11*1), X0
- MOVUPS X0, (BX)(R11*1)
- ADDQ $0x10, R11
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
-copy_4_test:
- CMPQ R11, R13
- JB copy_4
+copy_4_end:
ADDQ R13, DI
- ADDQ R13, BX
ADDQ $0x18, AX
INCQ DX
CMPQ DX, CX
@@ -1234,53 +1241,74 @@ copy_4_test:
JMP loop_finished
copy_all_from_history:
- XORQ R15, R15
- TESTQ $0x00000001, R11
- JZ copy_5_word
- MOVB (R14)(R15*1), BP
- MOVB BP, (BX)(R15*1)
- ADDQ $0x01, R15
-
-copy_5_word:
- TESTQ $0x00000002, R11
- JZ copy_5_dword
- MOVW (R14)(R15*1), BP
- MOVW BP, (BX)(R15*1)
- ADDQ $0x02, R15
-
-copy_5_dword:
- TESTQ $0x00000004, R11
- JZ copy_5_qword
- MOVL (R14)(R15*1), BP
- MOVL BP, (BX)(R15*1)
- ADDQ $0x04, R15
-
-copy_5_qword:
- TESTQ $0x00000008, R11
- JZ copy_5_test
- MOVQ (R14)(R15*1), BP
- MOVQ BP, (BX)(R15*1)
- ADDQ $0x08, R15
- JMP copy_5_test
-
-copy_5:
- MOVUPS (R14)(R15*1), X0
- MOVUPS X0, (BX)(R15*1)
- ADDQ $0x10, R15
-
-copy_5_test:
- CMPQ R15, R11
- JB copy_5
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
ADDQ R11, DI
SUBQ R11, R13
// Copy match from the current buffer
copy_match:
- TESTQ R13, R13
- JZ handle_loop
- MOVQ BX, R11
- SUBQ R12, R11
+ MOVQ BX, R11
+ SUBQ R12, R11
// ml <= mo
CMPQ R13, R12
@@ -1382,45 +1410,67 @@ main_loop:
// Copy literals
TESTQ R11, R11
JZ check_offset
- XORQ R14, R14
- TESTQ $0x00000001, R11
- JZ copy_1_word
- MOVB (SI)(R14*1), R15
- MOVB R15, (BX)(R14*1)
- ADDQ $0x01, R14
-
-copy_1_word:
- TESTQ $0x00000002, R11
- JZ copy_1_dword
- MOVW (SI)(R14*1), R15
- MOVW R15, (BX)(R14*1)
- ADDQ $0x02, R14
-
-copy_1_dword:
- TESTQ $0x00000004, R11
- JZ copy_1_qword
- MOVL (SI)(R14*1), R15
- MOVL R15, (BX)(R14*1)
- ADDQ $0x04, R14
-
-copy_1_qword:
- TESTQ $0x00000008, R11
- JZ copy_1_test
- MOVQ (SI)(R14*1), R15
- MOVQ R15, (BX)(R14*1)
- ADDQ $0x08, R14
- JMP copy_1_test
+ MOVQ R11, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (SI), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, SI
+ ADDQ $0x10, BX
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(SI)(R14*1), SI
+ LEAQ 16(BX)(R14*1), BX
+ MOVUPS -16(SI), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ R11, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ R11, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (SI), R14
+ MOVB -1(SI)(R11*1), R15
+ MOVB R14, (BX)
+ MOVB R15, -1(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
-copy_1:
- MOVUPS (SI)(R14*1), X0
- MOVUPS X0, (BX)(R14*1)
- ADDQ $0x10, R14
+copy_1_move_3:
+ MOVW (SI), R14
+ MOVB 2(SI), R15
+ MOVW R14, (BX)
+ MOVB R15, 2(BX)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
-copy_1_test:
- CMPQ R14, R11
- JB copy_1
+copy_1_move_4through7:
+ MOVL (SI), R14
+ MOVL -4(SI)(R11*1), R15
+ MOVL R14, (BX)
+ MOVL R15, -4(BX)(R11*1)
ADDQ R11, SI
ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (SI), R14
+ MOVQ -8(SI)(R11*1), R15
+ MOVQ R14, (BX)
+ MOVQ R15, -8(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+
+copy_1_end:
ADDQ R11, DI
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
@@ -1432,52 +1482,65 @@ check_offset:
JG error_match_off_too_big
// Copy match from history
- MOVQ R12, R11
- SUBQ DI, R11
- JLS copy_match
- MOVQ R9, R14
- SUBQ R11, R14
- CMPQ R13, R11
- JGE copy_all_from_history
- XORQ R11, R11
- TESTQ $0x00000001, R13
- JZ copy_4_word
- MOVB (R14)(R11*1), R12
- MOVB R12, (BX)(R11*1)
- ADDQ $0x01, R11
-
-copy_4_word:
- TESTQ $0x00000002, R13
- JZ copy_4_dword
- MOVW (R14)(R11*1), R12
- MOVW R12, (BX)(R11*1)
- ADDQ $0x02, R11
-
-copy_4_dword:
- TESTQ $0x00000004, R13
- JZ copy_4_qword
- MOVL (R14)(R11*1), R12
- MOVL R12, (BX)(R11*1)
- ADDQ $0x04, R11
-
-copy_4_qword:
- TESTQ $0x00000008, R13
- JZ copy_4_test
- MOVQ (R14)(R11*1), R12
- MOVQ R12, (BX)(R11*1)
- ADDQ $0x08, R11
- JMP copy_4_test
-
-copy_4:
- MOVUPS (R14)(R11*1), X0
- MOVUPS X0, (BX)(R11*1)
- ADDQ $0x10, R11
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
-copy_4_test:
- CMPQ R11, R13
- JB copy_4
+copy_4_end:
ADDQ R13, DI
- ADDQ R13, BX
ADDQ $0x18, AX
INCQ DX
CMPQ DX, CX
@@ -1485,99 +1548,143 @@ copy_4_test:
JMP loop_finished
copy_all_from_history:
- XORQ R15, R15
- TESTQ $0x00000001, R11
- JZ copy_5_word
- MOVB (R14)(R15*1), BP
- MOVB BP, (BX)(R15*1)
- ADDQ $0x01, R15
-
-copy_5_word:
- TESTQ $0x00000002, R11
- JZ copy_5_dword
- MOVW (R14)(R15*1), BP
- MOVW BP, (BX)(R15*1)
- ADDQ $0x02, R15
-
-copy_5_dword:
- TESTQ $0x00000004, R11
- JZ copy_5_qword
- MOVL (R14)(R15*1), BP
- MOVL BP, (BX)(R15*1)
- ADDQ $0x04, R15
-
-copy_5_qword:
- TESTQ $0x00000008, R11
- JZ copy_5_test
- MOVQ (R14)(R15*1), BP
- MOVQ BP, (BX)(R15*1)
- ADDQ $0x08, R15
- JMP copy_5_test
-
-copy_5:
- MOVUPS (R14)(R15*1), X0
- MOVUPS X0, (BX)(R15*1)
- ADDQ $0x10, R15
-
-copy_5_test:
- CMPQ R15, R11
- JB copy_5
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
ADDQ R11, BX
+
+copy_5_end:
ADDQ R11, DI
SUBQ R11, R13
// Copy match from the current buffer
copy_match:
- TESTQ R13, R13
- JZ handle_loop
- MOVQ BX, R11
- SUBQ R12, R11
+ MOVQ BX, R11
+ SUBQ R12, R11
// ml <= mo
CMPQ R13, R12
JA copy_overlapping_match
// Copy non-overlapping match
- ADDQ R13, DI
- XORQ R12, R12
- TESTQ $0x00000001, R13
- JZ copy_2_word
- MOVB (R11)(R12*1), R14
- MOVB R14, (BX)(R12*1)
- ADDQ $0x01, R12
-
-copy_2_word:
- TESTQ $0x00000002, R13
- JZ copy_2_dword
- MOVW (R11)(R12*1), R14
- MOVW R14, (BX)(R12*1)
- ADDQ $0x02, R12
-
-copy_2_dword:
- TESTQ $0x00000004, R13
- JZ copy_2_qword
- MOVL (R11)(R12*1), R14
- MOVL R14, (BX)(R12*1)
- ADDQ $0x04, R12
-
-copy_2_qword:
- TESTQ $0x00000008, R13
- JZ copy_2_test
- MOVQ (R11)(R12*1), R14
- MOVQ R14, (BX)(R12*1)
- ADDQ $0x08, R12
- JMP copy_2_test
+ ADDQ R13, DI
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
-copy_2:
- MOVUPS (R11)(R12*1), X0
- MOVUPS X0, (BX)(R12*1)
- ADDQ $0x10, R12
+copy_2_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R11
+ ADDQ $0x10, BX
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(R11)(R12*1), R11
+ LEAQ 16(BX)(R12*1), BX
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (R11), R12
+ MOVB -1(R11)(R13*1), R14
+ MOVB R12, (BX)
+ MOVB R14, -1(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
-copy_2_test:
- CMPQ R12, R13
- JB copy_2
+copy_2_move_3:
+ MOVW (R11), R12
+ MOVB 2(R11), R14
+ MOVW R12, (BX)
+ MOVB R14, 2(BX)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (R11), R12
+ MOVL -4(R11)(R13*1), R14
+ MOVL R12, (BX)
+ MOVL R14, -4(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (R11), R12
+ MOVQ -8(R11)(R13*1), R14
+ MOVQ R12, (BX)
+ MOVQ R14, -8(BX)(R13*1)
+ ADDQ R13, R11
ADDQ R13, BX
- JMP handle_loop
+
+copy_2_end:
+ JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
@@ -1773,18 +1880,17 @@ sequenceDecs_decodeSync_amd64_fill_2_end:
MOVBQZX DI, R13
SHRQ $0x10, DI
MOVWQZX DI, DI
- CMPQ R13, $0x00
- JZ sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R13, BX
+ LEAQ (BX)(R13*1), CX
MOVQ DX, R14
- SHLQ CL, R14
- MOVQ R13, CX
- NEGQ CX
- SHRQ CL, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
ADDQ R14, DI
-sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero:
// Load ctx.llTable
MOVQ ctx+16(FP), CX
MOVQ (CX), CX
@@ -1794,18 +1900,17 @@ sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero:
MOVBQZX R8, R13
SHRQ $0x10, R8
MOVWQZX R8, R8
- CMPQ R13, $0x00
- JZ sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R13, BX
+ LEAQ (BX)(R13*1), CX
MOVQ DX, R14
- SHLQ CL, R14
- MOVQ R13, CX
- NEGQ CX
- SHRQ CL, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
ADDQ R14, R8
-sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero:
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
@@ -1815,18 +1920,17 @@ sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero:
MOVBQZX R9, R13
SHRQ $0x10, R9
MOVWQZX R9, R9
- CMPQ R13, $0x00
- JZ sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R13, BX
+ LEAQ (BX)(R13*1), CX
MOVQ DX, R14
- SHLQ CL, R14
- MOVQ R13, CX
- NEGQ CX
- SHRQ CL, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
ADDQ R14, R9
-sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero:
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX
@@ -1934,103 +2038,137 @@ check_offset:
JG error_match_off_too_big
// Copy match from history
- MOVQ CX, AX
- SUBQ R12, AX
- JLS copy_match
- MOVQ 48(SP), R14
- SUBQ AX, R14
- CMPQ R13, AX
- JGE copy_all_from_history
- XORQ AX, AX
- TESTQ $0x00000001, R13
- JZ copy_4_word
- MOVB (R14)(AX*1), CL
- MOVB CL, (R10)(AX*1)
- ADDQ $0x01, AX
-
-copy_4_word:
- TESTQ $0x00000002, R13
- JZ copy_4_dword
- MOVW (R14)(AX*1), CX
- MOVW CX, (R10)(AX*1)
- ADDQ $0x02, AX
-
-copy_4_dword:
- TESTQ $0x00000004, R13
- JZ copy_4_qword
- MOVL (R14)(AX*1), CX
- MOVL CX, (R10)(AX*1)
- ADDQ $0x04, AX
-
-copy_4_qword:
- TESTQ $0x00000008, R13
- JZ copy_4_test
- MOVQ (R14)(AX*1), CX
- MOVQ CX, (R10)(AX*1)
- ADDQ $0x08, AX
- JMP copy_4_test
-
-copy_4:
- MOVUPS (R14)(AX*1), X0
- MOVUPS X0, (R10)(AX*1)
- ADDQ $0x10, AX
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
-copy_4_test:
- CMPQ AX, R13
- JB copy_4
- ADDQ R13, R12
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
JMP handle_loop
JMP loop_finished
copy_all_from_history:
- XORQ R15, R15
- TESTQ $0x00000001, AX
- JZ copy_5_word
- MOVB (R14)(R15*1), BP
- MOVB BP, (R10)(R15*1)
- ADDQ $0x01, R15
-
-copy_5_word:
- TESTQ $0x00000002, AX
- JZ copy_5_dword
- MOVW (R14)(R15*1), BP
- MOVW BP, (R10)(R15*1)
- ADDQ $0x02, R15
-
-copy_5_dword:
- TESTQ $0x00000004, AX
- JZ copy_5_qword
- MOVL (R14)(R15*1), BP
- MOVL BP, (R10)(R15*1)
- ADDQ $0x04, R15
-
-copy_5_qword:
- TESTQ $0x00000008, AX
- JZ copy_5_test
- MOVQ (R14)(R15*1), BP
- MOVQ BP, (R10)(R15*1)
- ADDQ $0x08, R15
- JMP copy_5_test
-
-copy_5:
- MOVUPS (R14)(R15*1), X0
- MOVUPS X0, (R10)(R15*1)
- ADDQ $0x10, R15
-
-copy_5_test:
- CMPQ R15, AX
- JB copy_5
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
ADDQ AX, R10
+
+copy_5_end:
ADDQ AX, R12
SUBQ AX, R13
// Copy match from the current buffer
copy_match:
- TESTQ R13, R13
- JZ handle_loop
- MOVQ R10, AX
- SUBQ CX, AX
+ MOVQ R10, AX
+ SUBQ CX, AX
// ml <= mo
CMPQ R13, CX
@@ -2407,103 +2545,137 @@ check_offset:
JG error_match_off_too_big
// Copy match from history
- MOVQ R12, CX
- SUBQ R11, CX
- JLS copy_match
- MOVQ 48(SP), R14
- SUBQ CX, R14
- CMPQ R13, CX
- JGE copy_all_from_history
- XORQ CX, CX
- TESTQ $0x00000001, R13
- JZ copy_4_word
- MOVB (R14)(CX*1), R12
- MOVB R12, (R9)(CX*1)
- ADDQ $0x01, CX
-
-copy_4_word:
- TESTQ $0x00000002, R13
- JZ copy_4_dword
- MOVW (R14)(CX*1), R12
- MOVW R12, (R9)(CX*1)
- ADDQ $0x02, CX
-
-copy_4_dword:
- TESTQ $0x00000004, R13
- JZ copy_4_qword
- MOVL (R14)(CX*1), R12
- MOVL R12, (R9)(CX*1)
- ADDQ $0x04, CX
-
-copy_4_qword:
- TESTQ $0x00000008, R13
- JZ copy_4_test
- MOVQ (R14)(CX*1), R12
- MOVQ R12, (R9)(CX*1)
- ADDQ $0x08, CX
- JMP copy_4_test
-
-copy_4:
- MOVUPS (R14)(CX*1), X0
- MOVUPS X0, (R9)(CX*1)
- ADDQ $0x10, CX
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
-copy_4_test:
- CMPQ CX, R13
- JB copy_4
+copy_4_end:
ADDQ R13, R11
- ADDQ R13, R9
JMP handle_loop
JMP loop_finished
copy_all_from_history:
- XORQ R15, R15
- TESTQ $0x00000001, CX
- JZ copy_5_word
- MOVB (R14)(R15*1), BP
- MOVB BP, (R9)(R15*1)
- ADDQ $0x01, R15
-
-copy_5_word:
- TESTQ $0x00000002, CX
- JZ copy_5_dword
- MOVW (R14)(R15*1), BP
- MOVW BP, (R9)(R15*1)
- ADDQ $0x02, R15
-
-copy_5_dword:
- TESTQ $0x00000004, CX
- JZ copy_5_qword
- MOVL (R14)(R15*1), BP
- MOVL BP, (R9)(R15*1)
- ADDQ $0x04, R15
-
-copy_5_qword:
- TESTQ $0x00000008, CX
- JZ copy_5_test
- MOVQ (R14)(R15*1), BP
- MOVQ BP, (R9)(R15*1)
- ADDQ $0x08, R15
- JMP copy_5_test
-
-copy_5:
- MOVUPS (R14)(R15*1), X0
- MOVUPS X0, (R9)(R15*1)
- ADDQ $0x10, R15
-
-copy_5_test:
- CMPQ R15, CX
- JB copy_5
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
ADDQ CX, R11
SUBQ CX, R13
// Copy match from the current buffer
copy_match:
- TESTQ R13, R13
- JZ handle_loop
- MOVQ R9, CX
- SUBQ R12, CX
+ MOVQ R9, CX
+ SUBQ R12, CX
// ml <= mo
CMPQ R13, R12
@@ -2746,18 +2918,17 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_end:
MOVBQZX DI, R13
SHRQ $0x10, DI
MOVWQZX DI, DI
- CMPQ R13, $0x00
- JZ sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R13, BX
+ LEAQ (BX)(R13*1), CX
MOVQ DX, R14
- SHLQ CL, R14
- MOVQ R13, CX
- NEGQ CX
- SHRQ CL, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
ADDQ R14, DI
-sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero:
// Load ctx.llTable
MOVQ ctx+16(FP), CX
MOVQ (CX), CX
@@ -2767,18 +2938,17 @@ sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero:
MOVBQZX R8, R13
SHRQ $0x10, R8
MOVWQZX R8, R8
- CMPQ R13, $0x00
- JZ sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R13, BX
+ LEAQ (BX)(R13*1), CX
MOVQ DX, R14
- SHLQ CL, R14
- MOVQ R13, CX
- NEGQ CX
- SHRQ CL, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
ADDQ R14, R8
-sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero:
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
@@ -2788,18 +2958,17 @@ sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero:
MOVBQZX R9, R13
SHRQ $0x10, R9
MOVWQZX R9, R9
- CMPQ R13, $0x00
- JZ sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero
- MOVQ BX, CX
- ADDQ R13, BX
+ LEAQ (BX)(R13*1), CX
MOVQ DX, R14
- SHLQ CL, R14
- MOVQ R13, CX
- NEGQ CX
- SHRQ CL, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
ADDQ R14, R9
-sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero:
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX
@@ -2885,45 +3054,67 @@ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok:
// Copy literals
TESTQ AX, AX
JZ check_offset
- XORQ R14, R14
- TESTQ $0x00000001, AX
- JZ copy_1_word
- MOVB (R11)(R14*1), R15
- MOVB R15, (R10)(R14*1)
- ADDQ $0x01, R14
-
-copy_1_word:
- TESTQ $0x00000002, AX
- JZ copy_1_dword
- MOVW (R11)(R14*1), R15
- MOVW R15, (R10)(R14*1)
- ADDQ $0x02, R14
-
-copy_1_dword:
- TESTQ $0x00000004, AX
- JZ copy_1_qword
- MOVL (R11)(R14*1), R15
- MOVL R15, (R10)(R14*1)
- ADDQ $0x04, R14
-
-copy_1_qword:
- TESTQ $0x00000008, AX
- JZ copy_1_test
- MOVQ (R11)(R14*1), R15
- MOVQ R15, (R10)(R14*1)
- ADDQ $0x08, R14
- JMP copy_1_test
+ MOVQ AX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
-copy_1:
- MOVUPS (R11)(R14*1), X0
- MOVUPS X0, (R10)(R14*1)
- ADDQ $0x10, R14
+copy_1_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R10
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R11)(R14*1), R11
+ LEAQ 16(R10)(R14*1), R10
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ AX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ AX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R11), R14
+ MOVB -1(R11)(AX*1), R15
+ MOVB R14, (R10)
+ MOVB R15, -1(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R11), R14
+ MOVB 2(R11), R15
+ MOVW R14, (R10)
+ MOVB R15, 2(R10)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R11), R14
+ MOVL -4(R11)(AX*1), R15
+ MOVL R14, (R10)
+ MOVL R15, -4(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
-copy_1_test:
- CMPQ R14, AX
- JB copy_1
+copy_1_move_8through16:
+ MOVQ (R11), R14
+ MOVQ -8(R11)(AX*1), R15
+ MOVQ R14, (R10)
+ MOVQ R15, -8(R10)(AX*1)
ADDQ AX, R11
ADDQ AX, R10
+
+copy_1_end:
ADDQ AX, R12
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
@@ -2936,149 +3127,206 @@ check_offset:
JG error_match_off_too_big
// Copy match from history
- MOVQ CX, AX
- SUBQ R12, AX
- JLS copy_match
- MOVQ 48(SP), R14
- SUBQ AX, R14
- CMPQ R13, AX
- JGE copy_all_from_history
- XORQ AX, AX
- TESTQ $0x00000001, R13
- JZ copy_4_word
- MOVB (R14)(AX*1), CL
- MOVB CL, (R10)(AX*1)
- ADDQ $0x01, AX
-
-copy_4_word:
- TESTQ $0x00000002, R13
- JZ copy_4_dword
- MOVW (R14)(AX*1), CX
- MOVW CX, (R10)(AX*1)
- ADDQ $0x02, AX
-
-copy_4_dword:
- TESTQ $0x00000004, R13
- JZ copy_4_qword
- MOVL (R14)(AX*1), CX
- MOVL CX, (R10)(AX*1)
- ADDQ $0x04, AX
-
-copy_4_qword:
- TESTQ $0x00000008, R13
- JZ copy_4_test
- MOVQ (R14)(AX*1), CX
- MOVQ CX, (R10)(AX*1)
- ADDQ $0x08, AX
- JMP copy_4_test
-
-copy_4:
- MOVUPS (R14)(AX*1), X0
- MOVUPS X0, (R10)(AX*1)
- ADDQ $0x10, AX
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
-copy_4_test:
- CMPQ AX, R13
- JB copy_4
- ADDQ R13, R12
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
JMP handle_loop
JMP loop_finished
copy_all_from_history:
- XORQ R15, R15
- TESTQ $0x00000001, AX
- JZ copy_5_word
- MOVB (R14)(R15*1), BP
- MOVB BP, (R10)(R15*1)
- ADDQ $0x01, R15
-
-copy_5_word:
- TESTQ $0x00000002, AX
- JZ copy_5_dword
- MOVW (R14)(R15*1), BP
- MOVW BP, (R10)(R15*1)
- ADDQ $0x02, R15
-
-copy_5_dword:
- TESTQ $0x00000004, AX
- JZ copy_5_qword
- MOVL (R14)(R15*1), BP
- MOVL BP, (R10)(R15*1)
- ADDQ $0x04, R15
-
-copy_5_qword:
- TESTQ $0x00000008, AX
- JZ copy_5_test
- MOVQ (R14)(R15*1), BP
- MOVQ BP, (R10)(R15*1)
- ADDQ $0x08, R15
- JMP copy_5_test
-
-copy_5:
- MOVUPS (R14)(R15*1), X0
- MOVUPS X0, (R10)(R15*1)
- ADDQ $0x10, R15
-
-copy_5_test:
- CMPQ R15, AX
- JB copy_5
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
ADDQ AX, R12
SUBQ AX, R13
// Copy match from the current buffer
copy_match:
- TESTQ R13, R13
- JZ handle_loop
- MOVQ R10, AX
- SUBQ CX, AX
+ MOVQ R10, AX
+ SUBQ CX, AX
// ml <= mo
CMPQ R13, CX
JA copy_overlapping_match
// Copy non-overlapping match
- ADDQ R13, R12
- XORQ CX, CX
- TESTQ $0x00000001, R13
- JZ copy_2_word
- MOVB (AX)(CX*1), R14
- MOVB R14, (R10)(CX*1)
- ADDQ $0x01, CX
-
-copy_2_word:
- TESTQ $0x00000002, R13
- JZ copy_2_dword
- MOVW (AX)(CX*1), R14
- MOVW R14, (R10)(CX*1)
- ADDQ $0x02, CX
-
-copy_2_dword:
- TESTQ $0x00000004, R13
- JZ copy_2_qword
- MOVL (AX)(CX*1), R14
- MOVL R14, (R10)(CX*1)
- ADDQ $0x04, CX
-
-copy_2_qword:
- TESTQ $0x00000008, R13
- JZ copy_2_test
- MOVQ (AX)(CX*1), R14
- MOVQ R14, (R10)(CX*1)
- ADDQ $0x08, CX
- JMP copy_2_test
-
-copy_2:
- MOVUPS (AX)(CX*1), X0
- MOVUPS X0, (R10)(CX*1)
- ADDQ $0x10, CX
+ ADDQ R13, R12
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_2_small
-copy_2_test:
- CMPQ CX, R13
- JB copy_2
+copy_2_loop:
+ MOVUPS (AX), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, AX
+ ADDQ $0x10, R10
+ SUBQ $0x10, CX
+ JAE copy_2_loop
+ LEAQ 16(AX)(CX*1), AX
+ LEAQ 16(R10)(CX*1), R10
+ MOVUPS -16(AX), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (AX), CL
+ MOVB -1(AX)(R13*1), R14
+ MOVB CL, (R10)
+ MOVB R14, -1(R10)(R13*1)
+ ADDQ R13, AX
ADDQ R13, R10
- JMP handle_loop
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (AX), CX
+ MOVB 2(AX), R14
+ MOVW CX, (R10)
+ MOVB R14, 2(R10)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (AX), CX
+ MOVL -4(AX)(R13*1), R14
+ MOVL CX, (R10)
+ MOVL R14, -4(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (AX), CX
+ MOVQ -8(AX)(R13*1), R14
+ MOVQ CX, (R10)
+ MOVQ R14, -8(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+
+copy_2_end:
+ JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
@@ -3415,45 +3663,67 @@ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok:
// Copy literals
TESTQ CX, CX
JZ check_offset
- XORQ R14, R14
- TESTQ $0x00000001, CX
- JZ copy_1_word
- MOVB (R10)(R14*1), R15
- MOVB R15, (R9)(R14*1)
- ADDQ $0x01, R14
-
-copy_1_word:
- TESTQ $0x00000002, CX
- JZ copy_1_dword
- MOVW (R10)(R14*1), R15
- MOVW R15, (R9)(R14*1)
- ADDQ $0x02, R14
-
-copy_1_dword:
- TESTQ $0x00000004, CX
- JZ copy_1_qword
- MOVL (R10)(R14*1), R15
- MOVL R15, (R9)(R14*1)
- ADDQ $0x04, R14
-
-copy_1_qword:
- TESTQ $0x00000008, CX
- JZ copy_1_test
- MOVQ (R10)(R14*1), R15
- MOVQ R15, (R9)(R14*1)
- ADDQ $0x08, R14
- JMP copy_1_test
+ MOVQ CX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R10), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R10
+ ADDQ $0x10, R9
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R10)(R14*1), R10
+ LEAQ 16(R9)(R14*1), R9
+ MOVUPS -16(R10), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ CX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ CX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R10), R14
+ MOVB -1(R10)(CX*1), R15
+ MOVB R14, (R9)
+ MOVB R15, -1(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
-copy_1:
- MOVUPS (R10)(R14*1), X0
- MOVUPS X0, (R9)(R14*1)
- ADDQ $0x10, R14
+copy_1_move_3:
+ MOVW (R10), R14
+ MOVB 2(R10), R15
+ MOVW R14, (R9)
+ MOVB R15, 2(R9)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R10), R14
+ MOVL -4(R10)(CX*1), R15
+ MOVL R14, (R9)
+ MOVL R15, -4(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
-copy_1_test:
- CMPQ R14, CX
- JB copy_1
+copy_1_move_8through16:
+ MOVQ (R10), R14
+ MOVQ -8(R10)(CX*1), R15
+ MOVQ R14, (R9)
+ MOVQ R15, -8(R9)(CX*1)
ADDQ CX, R10
ADDQ CX, R9
+
+copy_1_end:
ADDQ CX, R11
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
@@ -3466,149 +3736,206 @@ check_offset:
JG error_match_off_too_big
// Copy match from history
- MOVQ R12, CX
- SUBQ R11, CX
- JLS copy_match
- MOVQ 48(SP), R14
- SUBQ CX, R14
- CMPQ R13, CX
- JGE copy_all_from_history
- XORQ CX, CX
- TESTQ $0x00000001, R13
- JZ copy_4_word
- MOVB (R14)(CX*1), R12
- MOVB R12, (R9)(CX*1)
- ADDQ $0x01, CX
-
-copy_4_word:
- TESTQ $0x00000002, R13
- JZ copy_4_dword
- MOVW (R14)(CX*1), R12
- MOVW R12, (R9)(CX*1)
- ADDQ $0x02, CX
-
-copy_4_dword:
- TESTQ $0x00000004, R13
- JZ copy_4_qword
- MOVL (R14)(CX*1), R12
- MOVL R12, (R9)(CX*1)
- ADDQ $0x04, CX
-
-copy_4_qword:
- TESTQ $0x00000008, R13
- JZ copy_4_test
- MOVQ (R14)(CX*1), R12
- MOVQ R12, (R9)(CX*1)
- ADDQ $0x08, CX
- JMP copy_4_test
-
-copy_4:
- MOVUPS (R14)(CX*1), X0
- MOVUPS X0, (R9)(CX*1)
- ADDQ $0x10, CX
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
-copy_4_test:
- CMPQ CX, R13
- JB copy_4
+copy_4_end:
ADDQ R13, R11
- ADDQ R13, R9
JMP handle_loop
JMP loop_finished
copy_all_from_history:
- XORQ R15, R15
- TESTQ $0x00000001, CX
- JZ copy_5_word
- MOVB (R14)(R15*1), BP
- MOVB BP, (R9)(R15*1)
- ADDQ $0x01, R15
-
-copy_5_word:
- TESTQ $0x00000002, CX
- JZ copy_5_dword
- MOVW (R14)(R15*1), BP
- MOVW BP, (R9)(R15*1)
- ADDQ $0x02, R15
-
-copy_5_dword:
- TESTQ $0x00000004, CX
- JZ copy_5_qword
- MOVL (R14)(R15*1), BP
- MOVL BP, (R9)(R15*1)
- ADDQ $0x04, R15
-
-copy_5_qword:
- TESTQ $0x00000008, CX
- JZ copy_5_test
- MOVQ (R14)(R15*1), BP
- MOVQ BP, (R9)(R15*1)
- ADDQ $0x08, R15
- JMP copy_5_test
-
-copy_5:
- MOVUPS (R14)(R15*1), X0
- MOVUPS X0, (R9)(R15*1)
- ADDQ $0x10, R15
-
-copy_5_test:
- CMPQ R15, CX
- JB copy_5
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
ADDQ CX, R11
SUBQ CX, R13
// Copy match from the current buffer
copy_match:
- TESTQ R13, R13
- JZ handle_loop
- MOVQ R9, CX
- SUBQ R12, CX
+ MOVQ R9, CX
+ SUBQ R12, CX
// ml <= mo
CMPQ R13, R12
JA copy_overlapping_match
// Copy non-overlapping match
- ADDQ R13, R11
- XORQ R12, R12
- TESTQ $0x00000001, R13
- JZ copy_2_word
- MOVB (CX)(R12*1), R14
- MOVB R14, (R9)(R12*1)
- ADDQ $0x01, R12
-
-copy_2_word:
- TESTQ $0x00000002, R13
- JZ copy_2_dword
- MOVW (CX)(R12*1), R14
- MOVW R14, (R9)(R12*1)
- ADDQ $0x02, R12
-
-copy_2_dword:
- TESTQ $0x00000004, R13
- JZ copy_2_qword
- MOVL (CX)(R12*1), R14
- MOVL R14, (R9)(R12*1)
- ADDQ $0x04, R12
-
-copy_2_qword:
- TESTQ $0x00000008, R13
- JZ copy_2_test
- MOVQ (CX)(R12*1), R14
- MOVQ R14, (R9)(R12*1)
- ADDQ $0x08, R12
- JMP copy_2_test
-
-copy_2:
- MOVUPS (CX)(R12*1), X0
- MOVUPS X0, (R9)(R12*1)
- ADDQ $0x10, R12
+ ADDQ R13, R11
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
-copy_2_test:
- CMPQ R12, R13
- JB copy_2
+copy_2_loop:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R9
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(CX)(R12*1), CX
+ LEAQ 16(R9)(R12*1), R9
+ MOVUPS -16(CX), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (CX), R12
+ MOVB -1(CX)(R13*1), R14
+ MOVB R12, (R9)
+ MOVB R14, -1(R9)(R13*1)
+ ADDQ R13, CX
ADDQ R13, R9
- JMP handle_loop
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (CX), R12
+ MOVB 2(CX), R14
+ MOVW R12, (R9)
+ MOVB R14, 2(R9)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (CX), R12
+ MOVL -4(CX)(R13*1), R14
+ MOVL R12, (R9)
+ MOVL R14, -4(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (CX), R12
+ MOVQ -8(CX)(R13*1), R14
+ MOVQ R12, (R9)
+ MOVQ R14, -8(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+
+copy_2_end:
+ JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
diff --git a/vendor/github.com/letsencrypt/boulder/LICENSE.txt b/vendor/github.com/letsencrypt/boulder/LICENSE.txt
new file mode 100644
index 000000000..fa274d92d
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/LICENSE.txt
@@ -0,0 +1,375 @@
+Copyright 2016 ISRG. All rights reserved.
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go
new file mode 100644
index 000000000..4b4a67c48
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/challenges.go
@@ -0,0 +1,27 @@
+package core
+
+func newChallenge(challengeType AcmeChallenge, token string) Challenge {
+ return Challenge{
+ Type: challengeType,
+ Status: StatusPending,
+ Token: token,
+ }
+}
+
+// HTTPChallenge01 constructs a random http-01 challenge. If token is empty a random token
+// will be generated, otherwise the provided token is used.
+func HTTPChallenge01(token string) Challenge {
+ return newChallenge(ChallengeTypeHTTP01, token)
+}
+
+// DNSChallenge01 constructs a random dns-01 challenge. If token is empty a random token
+// will be generated, otherwise the provided token is used.
+func DNSChallenge01(token string) Challenge {
+ return newChallenge(ChallengeTypeDNS01, token)
+}
+
+// TLSALPNChallenge01 constructs a random tls-alpn-01 challenge. If token is empty a random token
+// will be generated, otherwise the provided token is used.
+func TLSALPNChallenge01(token string) Challenge {
+ return newChallenge(ChallengeTypeTLSALPN01, token)
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go
new file mode 100644
index 000000000..85cdc9a49
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/interfaces.go
@@ -0,0 +1,14 @@
+package core
+
+import (
+ "github.com/letsencrypt/boulder/identifier"
+)
+
+// PolicyAuthority defines the public interface for the Boulder PA
+// TODO(#5891): Move this interface to a more appropriate location.
+type PolicyAuthority interface {
+ WillingToIssue(domain identifier.ACMEIdentifier) error
+ WillingToIssueWildcards(identifiers []identifier.ACMEIdentifier) error
+ ChallengesFor(domain identifier.ACMEIdentifier) ([]Challenge, error)
+ ChallengeTypeEnabled(t AcmeChallenge) bool
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go
new file mode 100644
index 000000000..9e328e823
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/objects.go
@@ -0,0 +1,536 @@
+package core
+
+import (
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "hash/fnv"
+ "net"
+ "strings"
+ "time"
+
+ "gopkg.in/square/go-jose.v2"
+
+ "github.com/letsencrypt/boulder/identifier"
+ "github.com/letsencrypt/boulder/probs"
+ "github.com/letsencrypt/boulder/revocation"
+)
+
+// AcmeStatus defines the state of a given authorization
+type AcmeStatus string
+
+// These statuses are the states of authorizations, challenges, and registrations
+const (
+ StatusUnknown = AcmeStatus("unknown") // Unknown status; the default
+ StatusPending = AcmeStatus("pending") // In process; client has next action
+ StatusProcessing = AcmeStatus("processing") // In process; server has next action
+ StatusReady = AcmeStatus("ready") // Order is ready for finalization
+ StatusValid = AcmeStatus("valid") // Object is valid
+ StatusInvalid = AcmeStatus("invalid") // Validation failed
+ StatusRevoked = AcmeStatus("revoked") // Object no longer valid
+ StatusDeactivated = AcmeStatus("deactivated") // Object has been deactivated
+)
+
+// AcmeResource values identify different types of ACME resources
+type AcmeResource string
+
+// The types of ACME resources
+const (
+ ResourceNewReg = AcmeResource("new-reg")
+ ResourceNewAuthz = AcmeResource("new-authz")
+ ResourceNewCert = AcmeResource("new-cert")
+ ResourceRevokeCert = AcmeResource("revoke-cert")
+ ResourceRegistration = AcmeResource("reg")
+ ResourceChallenge = AcmeResource("challenge")
+ ResourceAuthz = AcmeResource("authz")
+ ResourceKeyChange = AcmeResource("key-change")
+)
+
+// AcmeChallenge values identify different types of ACME challenges
+type AcmeChallenge string
+
+// These types are the available challenges
+// TODO(#5009): Make this a custom type as well.
+const (
+ ChallengeTypeHTTP01 = AcmeChallenge("http-01")
+ ChallengeTypeDNS01 = AcmeChallenge("dns-01")
+ ChallengeTypeTLSALPN01 = AcmeChallenge("tls-alpn-01")
+)
+
+// IsValid tests whether the challenge is a known challenge
+func (c AcmeChallenge) IsValid() bool {
+ switch c {
+ case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01:
+ return true
+ default:
+ return false
+ }
+}
+
+// OCSPStatus defines the state of OCSP for a domain
+type OCSPStatus string
+
+// These status are the states of OCSP
+const (
+ OCSPStatusGood = OCSPStatus("good")
+ OCSPStatusRevoked = OCSPStatus("revoked")
+)
+
+// DNSPrefix is attached to DNS names in DNS challenges
+const DNSPrefix = "_acme-challenge"
+
+// CertificateRequest is just a CSR
+//
+// This data is unmarshalled from JSON by way of RawCertificateRequest, which
+// represents the actual structure received from the client.
+type CertificateRequest struct {
+ CSR *x509.CertificateRequest // The CSR
+ Bytes []byte // The original bytes of the CSR, for logging.
+}
+
+type RawCertificateRequest struct {
+ CSR JSONBuffer `json:"csr"` // The encoded CSR
+}
+
+// UnmarshalJSON provides an implementation for decoding CertificateRequest objects.
+func (cr *CertificateRequest) UnmarshalJSON(data []byte) error {
+ var raw RawCertificateRequest
+ err := json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ csr, err := x509.ParseCertificateRequest(raw.CSR)
+ if err != nil {
+ return err
+ }
+
+ cr.CSR = csr
+ cr.Bytes = raw.CSR
+ return nil
+}
+
+// MarshalJSON provides an implementation for encoding CertificateRequest objects.
+func (cr CertificateRequest) MarshalJSON() ([]byte, error) {
+ return json.Marshal(RawCertificateRequest{
+ CSR: cr.CSR.Raw,
+ })
+}
+
+// Registration objects represent non-public metadata attached
+// to account keys.
+type Registration struct {
+ // Unique identifier
+ ID int64 `json:"id,omitempty" db:"id"`
+
+ // Account key to which the details are attached
+ Key *jose.JSONWebKey `json:"key"`
+
+ // Contact URIs
+ Contact *[]string `json:"contact,omitempty"`
+
+ // Agreement with terms of service
+ Agreement string `json:"agreement,omitempty"`
+
+ // InitialIP is the IP address from which the registration was created
+ InitialIP net.IP `json:"initialIp"`
+
+ // CreatedAt is the time the registration was created.
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+
+ Status AcmeStatus `json:"status"`
+}
+
+// ValidationRecord represents a validation attempt against a specific URL/hostname
+// and the IP addresses that were resolved and used
+type ValidationRecord struct {
+ // SimpleHTTP only
+ URL string `json:"url,omitempty"`
+
+ // Shared
+ Hostname string `json:"hostname"`
+ Port string `json:"port,omitempty"`
+ AddressesResolved []net.IP `json:"addressesResolved,omitempty"`
+ AddressUsed net.IP `json:"addressUsed,omitempty"`
+ // AddressesTried contains a list of addresses tried before the `AddressUsed`.
+ // Presently this will only ever be one IP from `AddressesResolved` since the
+ // only retry is in the case of a v6 failure with one v4 fallback. E.g. if
+ // a record with `AddressesResolved: { 127.0.0.1, ::1 }` were processed for
+ // a challenge validation with the IPv6 first flag on and the ::1 address
+ // failed but the 127.0.0.1 retry succeeded then the record would end up
+ // being:
+ // {
+ // ...
+ // AddressesResolved: [ 127.0.0.1, ::1 ],
+ // AddressUsed: 127.0.0.1
+ // AddressesTried: [ ::1 ],
+ // ...
+ // }
+ AddressesTried []net.IP `json:"addressesTried,omitempty"`
+
+ // OldTLS is true if any request in the validation chain used HTTPS and negotiated
+ // a TLS version lower than 1.2.
+ // TODO(#6011): Remove once TLS 1.0 and 1.1 support is gone.
+ OldTLS bool `json:"oldTLS,omitempty"`
+}
+
+func looksLikeKeyAuthorization(str string) error {
+ parts := strings.Split(str, ".")
+ if len(parts) != 2 {
+ return fmt.Errorf("Invalid key authorization: does not look like a key authorization")
+ } else if !LooksLikeAToken(parts[0]) {
+ return fmt.Errorf("Invalid key authorization: malformed token")
+ } else if !LooksLikeAToken(parts[1]) {
+ // Thumbprints have the same syntax as tokens in boulder
+ // Both are base64-encoded and 32 octets
+ return fmt.Errorf("Invalid key authorization: malformed key thumbprint")
+ }
+ return nil
+}
+
+// Challenge is an aggregate of all data needed for any challenges.
+//
+// Rather than define individual types for different types of
+// challenge, we just throw all the elements into one bucket,
+// together with the common metadata elements.
+type Challenge struct {
+ // The type of challenge
+ Type AcmeChallenge `json:"type"`
+
+ // The status of this challenge
+ Status AcmeStatus `json:"status,omitempty"`
+
+ // Contains the error that occurred during challenge validation, if any
+ Error *probs.ProblemDetails `json:"error,omitempty"`
+
+ // A URI to which a response can be POSTed
+ URI string `json:"uri,omitempty"`
+
+ // For the V2 API the "URI" field is deprecated in favour of URL.
+ URL string `json:"url,omitempty"`
+
+ // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges
+ Token string `json:"token,omitempty"`
+
+ // The expected KeyAuthorization for validation of the challenge. Populated by
+ // the RA prior to passing the challenge to the VA. For legacy reasons this
+ // field is called "ProvidedKeyAuthorization" because it was initially set by
+ // the content of the challenge update POST from the client. It is no longer
+ // set that way and should be renamed to "KeyAuthorization".
+ // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`.
+ ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"`
+
+ // Contains information about URLs used or redirected to and IPs resolved and
+ // used
+ ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"`
+ // The time at which the server validated the challenge. Required by
+ // RFC8555 if status is valid.
+ Validated *time.Time `json:"validated,omitempty"`
+}
+
+// ExpectedKeyAuthorization computes the expected KeyAuthorization value for
+// the challenge.
+func (ch Challenge) ExpectedKeyAuthorization(key *jose.JSONWebKey) (string, error) {
+ if key == nil {
+ return "", fmt.Errorf("Cannot authorize a nil key")
+ }
+
+ thumbprint, err := key.Thumbprint(crypto.SHA256)
+ if err != nil {
+ return "", err
+ }
+
+ return ch.Token + "." + base64.RawURLEncoding.EncodeToString(thumbprint), nil
+}
+
+// RecordsSane checks the sanity of a ValidationRecord object before sending it
+// back to the RA to be stored.
+func (ch Challenge) RecordsSane() bool {
+ if ch.ValidationRecord == nil || len(ch.ValidationRecord) == 0 {
+ return false
+ }
+
+ switch ch.Type {
+ case ChallengeTypeHTTP01:
+ for _, rec := range ch.ValidationRecord {
+ if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || rec.AddressUsed == nil ||
+ len(rec.AddressesResolved) == 0 {
+ return false
+ }
+ }
+ case ChallengeTypeTLSALPN01:
+ if len(ch.ValidationRecord) > 1 {
+ return false
+ }
+ if ch.ValidationRecord[0].URL != "" {
+ return false
+ }
+ if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" ||
+ ch.ValidationRecord[0].AddressUsed == nil || len(ch.ValidationRecord[0].AddressesResolved) == 0 {
+ return false
+ }
+ case ChallengeTypeDNS01:
+ if len(ch.ValidationRecord) > 1 {
+ return false
+ }
+ if ch.ValidationRecord[0].Hostname == "" {
+ return false
+ }
+ return true
+ default: // Unsupported challenge type
+ return false
+ }
+
+ return true
+}
+
+// CheckConsistencyForClientOffer checks the fields of a challenge object before it is
+// given to the client.
+func (ch Challenge) CheckConsistencyForClientOffer() error {
+ err := ch.checkConsistency()
+ if err != nil {
+ return err
+ }
+
+ // Before completion, the key authorization field should be empty
+ if ch.ProvidedKeyAuthorization != "" {
+ return fmt.Errorf("A response to this challenge was already submitted.")
+ }
+ return nil
+}
+
+// CheckConsistencyForValidation checks the fields of a challenge object before it is
+// given to the VA.
+func (ch Challenge) CheckConsistencyForValidation() error {
+ err := ch.checkConsistency()
+ if err != nil {
+ return err
+ }
+
+ // If the challenge is completed, then there should be a key authorization
+ return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization)
+}
+
+// checkConsistency checks the sanity of a challenge object before issued to the client.
+func (ch Challenge) checkConsistency() error {
+ if ch.Status != StatusPending {
+ return fmt.Errorf("The challenge is not pending.")
+ }
+
+ // There always needs to be a token
+ if !LooksLikeAToken(ch.Token) {
+ return fmt.Errorf("The token is missing.")
+ }
+ return nil
+}
+
+// StringID is used to generate a ID for challenges associated with new style authorizations.
+// This is necessary as these challenges no longer have a unique non-sequential identifier
+// in the new storage scheme. This identifier is generated by constructing a fnv hash over the
+// challenge token and type and encoding the first 4 bytes of it using the base64 URL encoding.
+func (ch Challenge) StringID() string {
+ h := fnv.New128a()
+ h.Write([]byte(ch.Token))
+ h.Write([]byte(ch.Type))
+ return base64.RawURLEncoding.EncodeToString(h.Sum(nil)[0:4])
+}
+
+// Authorization represents the authorization of an account key holder
+// to act on behalf of a domain. This struct is intended to be used both
+// internally and for JSON marshaling on the wire. Any fields that should be
+// suppressed on the wire (e.g., ID, regID) must be made empty before marshaling.
+type Authorization struct {
+ // An identifier for this authorization, unique across
+ // authorizations and certificates within this instance.
+ ID string `json:"id,omitempty" db:"id"`
+
+ // The identifier for which authorization is being given
+ Identifier identifier.ACMEIdentifier `json:"identifier,omitempty" db:"identifier"`
+
+ // The registration ID associated with the authorization
+ RegistrationID int64 `json:"regId,omitempty" db:"registrationID"`
+
+ // The status of the validation of this authorization
+ Status AcmeStatus `json:"status,omitempty" db:"status"`
+
+ // The date after which this authorization will be no
+ // longer be considered valid. Note: a certificate may be issued even on the
+ // last day of an authorization's lifetime. The last day for which someone can
+ // hold a valid certificate based on an authorization is authorization
+ // lifetime + certificate lifetime.
+ Expires *time.Time `json:"expires,omitempty" db:"expires"`
+
+ // An array of challenges objects used to validate the
+ // applicant's control of the identifier. For authorizations
+ // in process, these are challenges to be fulfilled; for
+ // final authorizations, they describe the evidence that
+ // the server used in support of granting the authorization.
+ //
+ // There should only ever be one challenge of each type in this
+ // slice and the order of these challenges may not be predictable.
+ Challenges []Challenge `json:"challenges,omitempty" db:"-"`
+
+ // This field is deprecated. It's filled in by WFE for the ACMEv1 API.
+ Combinations [][]int `json:"combinations,omitempty" db:"combinations"`
+
+ // Wildcard is a Boulder-specific Authorization field that indicates the
+ // authorization was created as a result of an order containing a name with
+ // a `*.`wildcard prefix. This will help convey to users that an
+ // Authorization with the identifier `example.com` and one DNS-01 challenge
+ // corresponds to a name `*.example.com` from an associated order.
+ Wildcard bool `json:"wildcard,omitempty" db:"-"`
+}
+
+// FindChallengeByStringID will look for a challenge matching the given ID inside
+// this authorization. If found, it will return the index of that challenge within
+// the Authorization's Challenges array. Otherwise it will return -1.
+func (authz *Authorization) FindChallengeByStringID(id string) int {
+ for i, c := range authz.Challenges {
+ if c.StringID() == id {
+ return i
+ }
+ }
+ return -1
+}
+
+// SolvedBy will look through the Authorizations challenges, returning the type
+// of the *first* challenge it finds with Status: valid, or an error if no
+// challenge is valid.
+func (authz *Authorization) SolvedBy() (*AcmeChallenge, error) {
+ if len(authz.Challenges) == 0 {
+ return nil, fmt.Errorf("Authorization has no challenges")
+ }
+ for _, chal := range authz.Challenges {
+ if chal.Status == StatusValid {
+ return &chal.Type, nil
+ }
+ }
+ return nil, fmt.Errorf("Authorization not solved by any challenge")
+}
+
+// JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding
+// with stripped padding.
+type JSONBuffer []byte
+
+// URL-safe base64 encode that strips padding
+func base64URLEncode(data []byte) string {
+ var result = base64.URLEncoding.EncodeToString(data)
+ return strings.TrimRight(result, "=")
+}
+
+// URL-safe base64 decoder that adds padding
+func base64URLDecode(data string) ([]byte, error) {
+ var missing = (4 - len(data)%4) % 4
+ data += strings.Repeat("=", missing)
+ return base64.URLEncoding.DecodeString(data)
+}
+
+// MarshalJSON encodes a JSONBuffer for transmission.
+func (jb JSONBuffer) MarshalJSON() (result []byte, err error) {
+ return json.Marshal(base64URLEncode(jb))
+}
+
+// UnmarshalJSON decodes a JSONBuffer to an object.
+func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) {
+ var str string
+ err = json.Unmarshal(data, &str)
+ if err != nil {
+ return err
+ }
+ *jb, err = base64URLDecode(str)
+ return
+}
+
+// Certificate objects are entirely internal to the server. The only
+// thing exposed on the wire is the certificate itself.
+type Certificate struct {
+ ID int64 `db:"id"`
+ RegistrationID int64 `db:"registrationID"`
+
+ Serial string `db:"serial"`
+ Digest string `db:"digest"`
+ DER []byte `db:"der"`
+ Issued time.Time `db:"issued"`
+ Expires time.Time `db:"expires"`
+}
+
+// CertificateStatus structs are internal to the server. They represent the
+// latest data about the status of the certificate, required for OCSP updating
+// and for validating that the subscriber has accepted the certificate.
+type CertificateStatus struct {
+ ID int64 `db:"id"`
+
+ Serial string `db:"serial"`
+
+ // status: 'good' or 'revoked'. Note that good, expired certificates remain
+ // with status 'good' but don't necessarily get fresh OCSP responses.
+ Status OCSPStatus `db:"status"`
+
+ // ocspLastUpdated: The date and time of the last time we generated an OCSP
+ // response. If we have never generated one, this has the zero value of
+ // time.Time, i.e. Jan 1 1970.
+ OCSPLastUpdated time.Time `db:"ocspLastUpdated"`
+
+ // revokedDate: If status is 'revoked', this is the date and time it was
+ // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970.
+ RevokedDate time.Time `db:"revokedDate"`
+
+ // revokedReason: If status is 'revoked', this is the reason code for the
+ // revocation. Otherwise it is zero (which happens to be the reason
+ // code for 'unspecified').
+ RevokedReason revocation.Reason `db:"revokedReason"`
+
+ LastExpirationNagSent time.Time `db:"lastExpirationNagSent"`
+
+ // The encoded and signed OCSP response.
+ OCSPResponse []byte `db:"ocspResponse"`
+
+ // For performance reasons[0] we duplicate the `Expires` field of the
+ // `Certificates` object/table in `CertificateStatus` to avoid a costly `JOIN`
+ // later on just to retrieve this `Time` value. This helps both the OCSP
+ // updater and the expiration-mailer stay performant.
+ //
+ // Similarly, we add an explicit `IsExpired` boolean to `CertificateStatus`
+ // table that the OCSP updater so that the database can create a meaningful
+ // index on `(isExpired, ocspLastUpdated)` without a `JOIN` on `certificates`.
+ // For more detail see Boulder #1864[0].
+ //
+ // [0]: https://github.com/letsencrypt/boulder/issues/1864
+ NotAfter time.Time `db:"notAfter"`
+ IsExpired bool `db:"isExpired"`
+
+ // TODO(#5152): Change this to an issuance.Issuer(Name)ID after it no longer
+ // has to support both IssuerNameIDs and IssuerIDs.
+ IssuerID int64
+}
+
+// FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames
+// contained in a certificate.
+type FQDNSet struct {
+ ID int64
+ SetHash []byte
+ Serial string
+ Issued time.Time
+ Expires time.Time
+}
+
+// SCTDERs is a convenience type
+type SCTDERs [][]byte
+
+// CertDER is a convenience type that helps differentiate what the
+// underlying byte slice contains
+type CertDER []byte
+
+// SuggestedWindow is a type exposed inside the RenewalInfo resource.
+type SuggestedWindow struct {
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+}
+
+// RenewalInfo is a type which is exposed to clients which query the renewalInfo
+// endpoint specified in draft-aaron-ari.
+type RenewalInfo struct {
+ SuggestedWindow SuggestedWindow `json:"suggestedWindow"`
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go b/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go
new file mode 100644
index 000000000..3a9cc1036
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go
@@ -0,0 +1,1100 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.15.6
+// source: core.proto
+
+package proto
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Challenge struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"`
+ Uri string `protobuf:"bytes,9,opt,name=uri,proto3" json:"uri,omitempty"`
+ Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
+ KeyAuthorization string `protobuf:"bytes,5,opt,name=keyAuthorization,proto3" json:"keyAuthorization,omitempty"`
+ Validationrecords []*ValidationRecord `protobuf:"bytes,10,rep,name=validationrecords,proto3" json:"validationrecords,omitempty"`
+ Error *ProblemDetails `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"`
+ Validated int64 `protobuf:"varint,11,opt,name=validated,proto3" json:"validated,omitempty"`
+}
+
+func (x *Challenge) Reset() {
+ *x = Challenge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Challenge) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Challenge) ProtoMessage() {}
+
+func (x *Challenge) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Challenge.ProtoReflect.Descriptor instead.
+func (*Challenge) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Challenge) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Challenge) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *Challenge) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *Challenge) GetUri() string {
+ if x != nil {
+ return x.Uri
+ }
+ return ""
+}
+
+func (x *Challenge) GetToken() string {
+ if x != nil {
+ return x.Token
+ }
+ return ""
+}
+
+func (x *Challenge) GetKeyAuthorization() string {
+ if x != nil {
+ return x.KeyAuthorization
+ }
+ return ""
+}
+
+func (x *Challenge) GetValidationrecords() []*ValidationRecord {
+ if x != nil {
+ return x.Validationrecords
+ }
+ return nil
+}
+
+func (x *Challenge) GetError() *ProblemDetails {
+ if x != nil {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Challenge) GetValidated() int64 {
+ if x != nil {
+ return x.Validated
+ }
+ return 0
+}
+
+type ValidationRecord struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"`
+ AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // net.IP.MarshalText()
+ AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // net.IP.MarshalText()
+ Authorities []string `protobuf:"bytes,5,rep,name=authorities,proto3" json:"authorities,omitempty"`
+ Url string `protobuf:"bytes,6,opt,name=url,proto3" json:"url,omitempty"`
+ // A list of addresses tried before the address used (see
+ // core/objects.go and the comment on the ValidationRecord structure
+ // definition for more information.
+ AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // net.IP.MarshalText()
+}
+
+func (x *ValidationRecord) Reset() {
+ *x = ValidationRecord{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidationRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidationRecord) ProtoMessage() {}
+
+func (x *ValidationRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidationRecord.ProtoReflect.Descriptor instead.
+func (*ValidationRecord) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ValidationRecord) GetHostname() string {
+ if x != nil {
+ return x.Hostname
+ }
+ return ""
+}
+
+func (x *ValidationRecord) GetPort() string {
+ if x != nil {
+ return x.Port
+ }
+ return ""
+}
+
+func (x *ValidationRecord) GetAddressesResolved() [][]byte {
+ if x != nil {
+ return x.AddressesResolved
+ }
+ return nil
+}
+
+func (x *ValidationRecord) GetAddressUsed() []byte {
+ if x != nil {
+ return x.AddressUsed
+ }
+ return nil
+}
+
+func (x *ValidationRecord) GetAuthorities() []string {
+ if x != nil {
+ return x.Authorities
+ }
+ return nil
+}
+
+func (x *ValidationRecord) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *ValidationRecord) GetAddressesTried() [][]byte {
+ if x != nil {
+ return x.AddressesTried
+ }
+ return nil
+}
+
+type ProblemDetails struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"`
+ Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+ HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"`
+}
+
+func (x *ProblemDetails) Reset() {
+ *x = ProblemDetails{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProblemDetails) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProblemDetails) ProtoMessage() {}
+
+func (x *ProblemDetails) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProblemDetails.ProtoReflect.Descriptor instead.
+func (*ProblemDetails) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ProblemDetails) GetProblemType() string {
+ if x != nil {
+ return x.ProblemType
+ }
+ return ""
+}
+
+func (x *ProblemDetails) GetDetail() string {
+ if x != nil {
+ return x.Detail
+ }
+ return ""
+}
+
+func (x *ProblemDetails) GetHttpStatus() int32 {
+ if x != nil {
+ return x.HttpStatus
+ }
+ return 0
+}
+
+type Certificate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
+ Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"`
+ Der []byte `protobuf:"bytes,4,opt,name=der,proto3" json:"der,omitempty"`
+ Issued int64 `protobuf:"varint,5,opt,name=issued,proto3" json:"issued,omitempty"` // Unix timestamp (nanoseconds)
+ Expires int64 `protobuf:"varint,6,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *Certificate) Reset() {
+ *x = Certificate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Certificate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Certificate) ProtoMessage() {}
+
+func (x *Certificate) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Certificate.ProtoReflect.Descriptor instead.
+func (*Certificate) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Certificate) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *Certificate) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *Certificate) GetDigest() string {
+ if x != nil {
+ return x.Digest
+ }
+ return ""
+}
+
+func (x *Certificate) GetDer() []byte {
+ if x != nil {
+ return x.Der
+ }
+ return nil
+}
+
+func (x *Certificate) GetIssued() int64 {
+ if x != nil {
+ return x.Issued
+ }
+ return 0
+}
+
+func (x *Certificate) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+type CertificateStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+ Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+ OcspLastUpdated int64 `protobuf:"varint,4,opt,name=ocspLastUpdated,proto3" json:"ocspLastUpdated,omitempty"`
+ RevokedDate int64 `protobuf:"varint,5,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"`
+ RevokedReason int64 `protobuf:"varint,6,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"`
+ LastExpirationNagSent int64 `protobuf:"varint,7,opt,name=lastExpirationNagSent,proto3" json:"lastExpirationNagSent,omitempty"`
+ OcspResponse []byte `protobuf:"bytes,8,opt,name=ocspResponse,proto3" json:"ocspResponse,omitempty"`
+ NotAfter int64 `protobuf:"varint,9,opt,name=notAfter,proto3" json:"notAfter,omitempty"`
+ IsExpired bool `protobuf:"varint,10,opt,name=isExpired,proto3" json:"isExpired,omitempty"`
+ IssuerID int64 `protobuf:"varint,11,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
+}
+
+func (x *CertificateStatus) Reset() {
+ *x = CertificateStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CertificateStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CertificateStatus) ProtoMessage() {}
+
+func (x *CertificateStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CertificateStatus.ProtoReflect.Descriptor instead.
+func (*CertificateStatus) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *CertificateStatus) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *CertificateStatus) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *CertificateStatus) GetOcspLastUpdated() int64 {
+ if x != nil {
+ return x.OcspLastUpdated
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetRevokedDate() int64 {
+ if x != nil {
+ return x.RevokedDate
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetRevokedReason() int64 {
+ if x != nil {
+ return x.RevokedReason
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetLastExpirationNagSent() int64 {
+ if x != nil {
+ return x.LastExpirationNagSent
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetOcspResponse() []byte {
+ if x != nil {
+ return x.OcspResponse
+ }
+ return nil
+}
+
+func (x *CertificateStatus) GetNotAfter() int64 {
+ if x != nil {
+ return x.NotAfter
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetIsExpired() bool {
+ if x != nil {
+ return x.IsExpired
+ }
+ return false
+}
+
+func (x *CertificateStatus) GetIssuerID() int64 {
+ if x != nil {
+ return x.IssuerID
+ }
+ return 0
+}
+
+type Registration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ Contact []string `protobuf:"bytes,3,rep,name=contact,proto3" json:"contact,omitempty"`
+ ContactsPresent bool `protobuf:"varint,4,opt,name=contactsPresent,proto3" json:"contactsPresent,omitempty"`
+ Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"`
+ InitialIP []byte `protobuf:"bytes,6,opt,name=initialIP,proto3" json:"initialIP,omitempty"`
+ CreatedAt int64 `protobuf:"varint,7,opt,name=createdAt,proto3" json:"createdAt,omitempty"` // Unix timestamp (nanoseconds)
+ Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *Registration) Reset() {
+ *x = Registration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Registration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Registration) ProtoMessage() {}
+
+func (x *Registration) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Registration.ProtoReflect.Descriptor instead.
+func (*Registration) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Registration) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Registration) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *Registration) GetContact() []string {
+ if x != nil {
+ return x.Contact
+ }
+ return nil
+}
+
+func (x *Registration) GetContactsPresent() bool {
+ if x != nil {
+ return x.ContactsPresent
+ }
+ return false
+}
+
+func (x *Registration) GetAgreement() string {
+ if x != nil {
+ return x.Agreement
+ }
+ return ""
+}
+
+func (x *Registration) GetInitialIP() []byte {
+ if x != nil {
+ return x.InitialIP
+ }
+ return nil
+}
+
+func (x *Registration) GetCreatedAt() int64 {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return 0
+}
+
+func (x *Registration) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+type Authorization struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Identifier string `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"`
+ RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
+ Expires int64 `protobuf:"varint,5,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+ Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"`
+}
+
+func (x *Authorization) Reset() {
+ *x = Authorization{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authorization) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authorization) ProtoMessage() {}
+
+func (x *Authorization) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authorization.ProtoReflect.Descriptor instead.
+func (*Authorization) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Authorization) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *Authorization) GetIdentifier() string {
+ if x != nil {
+ return x.Identifier
+ }
+ return ""
+}
+
+func (x *Authorization) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *Authorization) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *Authorization) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+func (x *Authorization) GetChallenges() []*Challenge {
+ if x != nil {
+ return x.Challenges
+ }
+ return nil
+}
+
+type Order struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"`
+ Error *ProblemDetails `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
+ CertificateSerial string `protobuf:"bytes,5,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"`
+ Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
+ Names []string `protobuf:"bytes,8,rep,name=names,proto3" json:"names,omitempty"`
+ BeganProcessing bool `protobuf:"varint,9,opt,name=beganProcessing,proto3" json:"beganProcessing,omitempty"`
+ Created int64 `protobuf:"varint,10,opt,name=created,proto3" json:"created,omitempty"`
+ V2Authorizations []int64 `protobuf:"varint,11,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
+}
+
+func (x *Order) Reset() {
+ *x = Order{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Order) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Order) ProtoMessage() {}
+
+func (x *Order) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Order.ProtoReflect.Descriptor instead.
+func (*Order) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Order) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Order) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *Order) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+func (x *Order) GetError() *ProblemDetails {
+ if x != nil {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Order) GetCertificateSerial() string {
+ if x != nil {
+ return x.CertificateSerial
+ }
+ return ""
+}
+
+func (x *Order) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *Order) GetNames() []string {
+ if x != nil {
+ return x.Names
+ }
+ return nil
+}
+
+func (x *Order) GetBeganProcessing() bool {
+ if x != nil {
+ return x.BeganProcessing
+ }
+ return false
+}
+
+func (x *Order) GetCreated() int64 {
+ if x != nil {
+ return x.Created
+ }
+ return 0
+}
+
+func (x *Order) GetV2Authorizations() []int64 {
+ if x != nil {
+ return x.V2Authorizations
+ }
+ return nil
+}
+
+var File_core_proto protoreflect.FileDescriptor
+
+var file_core_proto_rawDesc = []byte{
+ 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x6f,
+ 0x72, 0x65, 0x22, 0xab, 0x02, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65,
+ 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64,
+ 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03,
+ 0x75, 0x72, 0x69, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x14,
+ 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10,
+ 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63,
+ 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f,
+ 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x22, 0xee, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c,
+ 0x76, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x55, 0x73,
+ 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
+ 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28,
+ 0x0c, 0x52, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65,
+ 0x64, 0x22, 0x6a, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79,
+ 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65,
+ 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xa9, 0x01,
+ 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a,
+ 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a,
+ 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64,
+ 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65,
+ 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12,
+ 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0xeb, 0x02, 0x0a, 0x11, 0x43, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x28, 0x0a, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61,
+ 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x76,
+ 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b,
+ 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x72,
+ 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f,
+ 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f,
+ 0x63, 0x73, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6e,
+ 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e,
+ 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x45, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49,
+ 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49,
+ 0x44, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xe6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73,
+ 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x12, 0x1c,
+ 0x0a, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09,
+ 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x22, 0xd6, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x0a,
+ 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67,
+ 0x65, 0x52, 0x0a, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x4a, 0x04, 0x08,
+ 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xd7, 0x02, 0x0a, 0x05, 0x4f, 0x72,
+ 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65,
+ 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62,
+ 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x28, 0x0a,
+ 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f,
+ 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08,
+ 0x06, 0x10, 0x07, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f,
+ 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_core_proto_rawDescOnce sync.Once
+ file_core_proto_rawDescData = file_core_proto_rawDesc
+)
+
+func file_core_proto_rawDescGZIP() []byte {
+ file_core_proto_rawDescOnce.Do(func() {
+ file_core_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_proto_rawDescData)
+ })
+ return file_core_proto_rawDescData
+}
+
+var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_core_proto_goTypes = []interface{}{
+ (*Challenge)(nil), // 0: core.Challenge
+ (*ValidationRecord)(nil), // 1: core.ValidationRecord
+ (*ProblemDetails)(nil), // 2: core.ProblemDetails
+ (*Certificate)(nil), // 3: core.Certificate
+ (*CertificateStatus)(nil), // 4: core.CertificateStatus
+ (*Registration)(nil), // 5: core.Registration
+ (*Authorization)(nil), // 6: core.Authorization
+ (*Order)(nil), // 7: core.Order
+}
+var file_core_proto_depIdxs = []int32{
+ 1, // 0: core.Challenge.validationrecords:type_name -> core.ValidationRecord
+ 2, // 1: core.Challenge.error:type_name -> core.ProblemDetails
+ 0, // 2: core.Authorization.challenges:type_name -> core.Challenge
+ 2, // 3: core.Order.error:type_name -> core.ProblemDetails
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_core_proto_init() }
+func file_core_proto_init() {
+ if File_core_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_core_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Challenge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidationRecord); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProblemDetails); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Certificate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CertificateStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Registration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authorization); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Order); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_core_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_core_proto_goTypes,
+ DependencyIndexes: file_core_proto_depIdxs,
+ MessageInfos: file_core_proto_msgTypes,
+ }.Build()
+ File_core_proto = out.File
+ file_core_proto_rawDesc = nil
+ file_core_proto_goTypes = nil
+ file_core_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.proto b/vendor/github.com/letsencrypt/boulder/core/proto/core.proto
new file mode 100644
index 000000000..06abe5e99
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/proto/core.proto
@@ -0,0 +1,95 @@
+syntax = "proto3";
+
+package core;
+option go_package = "github.com/letsencrypt/boulder/core/proto";
+
+message Challenge {
+ int64 id = 1;
+ string type = 2;
+ string status = 6;
+ string uri = 9;
+ string token = 3;
+ string keyAuthorization = 5;
+ repeated ValidationRecord validationrecords = 10;
+ ProblemDetails error = 7;
+ int64 validated = 11;
+}
+
+message ValidationRecord {
+ string hostname = 1;
+ string port = 2;
+ repeated bytes addressesResolved = 3; // net.IP.MarshalText()
+ bytes addressUsed = 4; // net.IP.MarshalText()
+
+ repeated string authorities = 5;
+ string url = 6;
+ // A list of addresses tried before the address used (see
+ // core/objects.go and the comment on the ValidationRecord structure
+ // definition for more information.
+ repeated bytes addressesTried = 7; // net.IP.MarshalText()
+}
+
+message ProblemDetails {
+ string problemType = 1;
+ string detail = 2;
+ int32 httpStatus = 3;
+}
+
+message Certificate {
+ int64 registrationID = 1;
+ string serial = 2;
+ string digest = 3;
+ bytes der = 4;
+ int64 issued = 5; // Unix timestamp (nanoseconds)
+ int64 expires = 6; // Unix timestamp (nanoseconds)
+}
+
+message CertificateStatus {
+ string serial = 1;
+ reserved 2; // previously subscriberApproved
+ string status = 3;
+ int64 ocspLastUpdated = 4;
+ int64 revokedDate = 5;
+ int64 revokedReason = 6;
+ int64 lastExpirationNagSent = 7;
+ bytes ocspResponse = 8;
+ int64 notAfter = 9;
+ bool isExpired = 10;
+ int64 issuerID = 11;
+}
+
+message Registration {
+ int64 id = 1;
+ bytes key = 2;
+ repeated string contact = 3;
+ bool contactsPresent = 4;
+ string agreement = 5;
+ bytes initialIP = 6;
+ int64 createdAt = 7; // Unix timestamp (nanoseconds)
+ string status = 8;
+}
+
+message Authorization {
+ string id = 1;
+ string identifier = 2;
+ int64 registrationID = 3;
+ string status = 4;
+ int64 expires = 5; // Unix timestamp (nanoseconds)
+ repeated core.Challenge challenges = 6;
+ reserved 7; // previously combinations
+ reserved 8; // previously v2
+}
+
+message Order {
+ int64 id = 1;
+ int64 registrationID = 2;
+ int64 expires = 3;
+ ProblemDetails error = 4;
+ string certificateSerial = 5;
+ reserved 6; // previously authorizations, deprecated in favor of v2Authorizations
+ string status = 7;
+ repeated string names = 8;
+ bool beganProcessing = 9;
+ int64 created = 10;
+ repeated int64 v2Authorizations = 11;
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go
new file mode 100644
index 000000000..29f0d9c3d
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/util.go
@@ -0,0 +1,298 @@
+package core
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "expvar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/big"
+ mrand "math/rand"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode"
+
+ jose "gopkg.in/square/go-jose.v2"
+)
+
+// Package Variables Variables
+
+// BuildID is set by the compiler (using -ldflags "-X core.BuildID $(git rev-parse --short HEAD)")
+// and is used by GetBuildID
+var BuildID string
+
+// BuildHost is set by the compiler and is used by GetBuildHost
+var BuildHost string
+
+// BuildTime is set by the compiler and is used by GetBuildTime
+var BuildTime string
+
+func init() {
+ expvar.NewString("BuildID").Set(BuildID)
+ expvar.NewString("BuildTime").Set(BuildTime)
+}
+
+// Random stuff
+
+type randSource interface {
+ Read(p []byte) (n int, err error)
+}
+
+// RandReader is used so that it can be replaced in tests that require
+// deterministic output
+var RandReader randSource = rand.Reader
+
+// RandomString returns a randomly generated string of the requested length.
+func RandomString(byteLength int) string {
+ b := make([]byte, byteLength)
+ _, err := io.ReadFull(RandReader, b)
+ if err != nil {
+ panic(fmt.Sprintf("Error reading random bytes: %s", err))
+ }
+ return base64.RawURLEncoding.EncodeToString(b)
+}
+
+// NewToken produces a random string for Challenges, etc.
+func NewToken() string {
+ return RandomString(32)
+}
+
+var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`)
+
+// LooksLikeAToken checks whether a string represents a 32-octet value in
+// the URL-safe base64 alphabet.
+func LooksLikeAToken(token string) bool {
+ return tokenFormat.MatchString(token)
+}
+
+// Fingerprints
+
+// Fingerprint256 produces an unpadded, URL-safe Base64-encoded SHA256 digest
+// of the data.
+func Fingerprint256(data []byte) string {
+ d := sha256.New()
+ _, _ = d.Write(data) // Never returns an error
+ return base64.RawURLEncoding.EncodeToString(d.Sum(nil))
+}
+
+type Sha256Digest [sha256.Size]byte
+
+// KeyDigest produces a Base64-encoded SHA256 digest of a
+// provided public key.
+func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) {
+ switch t := key.(type) {
+ case *jose.JSONWebKey:
+ if t == nil {
+ return Sha256Digest{}, fmt.Errorf("Cannot compute digest of nil key")
+ }
+ return KeyDigest(t.Key)
+ case jose.JSONWebKey:
+ return KeyDigest(t.Key)
+ default:
+ keyDER, err := x509.MarshalPKIXPublicKey(key)
+ if err != nil {
+ return Sha256Digest{}, err
+ }
+ return sha256.Sum256(keyDER), nil
+ }
+}
+
+// KeyDigestB64 produces a padded, standard Base64-encoded SHA256 digest of a
+// provided public key.
+func KeyDigestB64(key crypto.PublicKey) (string, error) {
+ digest, err := KeyDigest(key)
+ if err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(digest[:]), nil
+}
+
+// KeyDigestEquals determines whether two public keys have the same digest.
+func KeyDigestEquals(j, k crypto.PublicKey) bool {
+ digestJ, errJ := KeyDigestB64(j)
+ digestK, errK := KeyDigestB64(k)
+ // Keys that don't have a valid digest (due to marshalling problems)
+ // are never equal. So, e.g. nil keys are not equal.
+ if errJ != nil || errK != nil {
+ return false
+ }
+ return digestJ == digestK
+}
+
+// PublicKeysEqual determines whether two public keys have the same marshalled
+// bytes as one another
+func PublicKeysEqual(a, b interface{}) (bool, error) {
+ if a == nil || b == nil {
+ return false, errors.New("One or more nil arguments to PublicKeysEqual")
+ }
+ aBytes, err := x509.MarshalPKIXPublicKey(a)
+ if err != nil {
+ return false, err
+ }
+ bBytes, err := x509.MarshalPKIXPublicKey(b)
+ if err != nil {
+ return false, err
+ }
+ return bytes.Equal(aBytes, bBytes), nil
+}
+
+// SerialToString converts a certificate serial number (big.Int) to a String
+// consistently.
+func SerialToString(serial *big.Int) string {
+ return fmt.Sprintf("%036x", serial)
+}
+
+// StringToSerial converts a string into a certificate serial number (big.Int)
+// consistently.
+func StringToSerial(serial string) (*big.Int, error) {
+ var serialNum big.Int
+ if !ValidSerial(serial) {
+ return &serialNum, errors.New("Invalid serial number")
+ }
+ _, err := fmt.Sscanf(serial, "%036x", &serialNum)
+ return &serialNum, err
+}
+
+// ValidSerial tests whether the input string represents a syntactically
+// valid serial number, i.e., that it is a valid hex string between 32
+// and 36 characters long.
+func ValidSerial(serial string) bool {
+ // Originally, serial numbers were 32 hex characters long. We later increased
+ // them to 36, but we allow the shorter ones because they exist in some
+ // production databases.
+ if len(serial) != 32 && len(serial) != 36 {
+ return false
+ }
+ _, err := hex.DecodeString(serial)
+ return err == nil
+}
+
+// GetBuildID identifies what build is running.
+func GetBuildID() (retID string) {
+ retID = BuildID
+ if retID == "" {
+ retID = "Unspecified"
+ }
+ return
+}
+
+// GetBuildTime identifies when this build was made
+func GetBuildTime() (retID string) {
+ retID = BuildTime
+ if retID == "" {
+ retID = "Unspecified"
+ }
+ return
+}
+
+// GetBuildHost identifies the building host
+func GetBuildHost() (retID string) {
+ retID = BuildHost
+ if retID == "" {
+ retID = "Unspecified"
+ }
+ return
+}
+
+// IsAnyNilOrZero returns whether any of the supplied values are nil, or (if not)
+// if any of them is its type's zero-value. This is useful for validating that
+// all required fields on a proto message are present.
+func IsAnyNilOrZero(vals ...interface{}) bool {
+ for _, val := range vals {
+ switch v := val.(type) {
+ case nil:
+ return true
+ case []byte:
+ if len(v) == 0 {
+ return true
+ }
+ default:
+ if reflect.ValueOf(v).IsZero() {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// UniqueLowerNames returns the set of all unique names in the input after all
+// of them are lowercased. The returned names will be in their lowercased form
+// and sorted alphabetically.
+func UniqueLowerNames(names []string) (unique []string) {
+ nameMap := make(map[string]int, len(names))
+ for _, name := range names {
+ nameMap[strings.ToLower(name)] = 1
+ }
+
+ unique = make([]string, 0, len(nameMap))
+ for name := range nameMap {
+ unique = append(unique, name)
+ }
+ sort.Strings(unique)
+ return
+}
+
+// LoadCert loads a PEM certificate specified by filename or returns an error
+func LoadCert(filename string) (*x509.Certificate, error) {
+ certPEM, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ block, _ := pem.Decode(certPEM)
+ if block == nil {
+ return nil, fmt.Errorf("No data in cert PEM file %s", filename)
+ }
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return cert, nil
+}
+
+// retryJitter is used to prevent bunched retried queries from falling into lockstep
+const retryJitter = 0.2
+
+// RetryBackoff calculates a backoff time based on number of retries, will always
+// add jitter so requests that start in unison won't fall into lockstep. Because of
+// this the returned duration can always be larger than the maximum by a factor of
+// retryJitter. Adapted from
+// https://github.com/grpc/grpc-go/blob/v1.11.3/backoff.go#L77-L96
+func RetryBackoff(retries int, base, max time.Duration, factor float64) time.Duration {
+ if retries == 0 {
+ return 0
+ }
+ backoff, fMax := float64(base), float64(max)
+ for backoff < fMax && retries > 1 {
+ backoff *= factor
+ retries--
+ }
+ if backoff > fMax {
+ backoff = fMax
+ }
+ // Randomize backoff delays so that if a cluster of requests start at
+ // the same time, they won't operate in lockstep.
+ backoff *= (1 - retryJitter) + 2*retryJitter*mrand.Float64()
+ return time.Duration(backoff)
+}
+
+// IsASCII determines if every character in a string is encoded in
+// the ASCII character set.
+func IsASCII(str string) bool {
+ for _, r := range str {
+ if r > unicode.MaxASCII {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/letsencrypt/boulder/errors/errors.go b/vendor/github.com/letsencrypt/boulder/errors/errors.go
new file mode 100644
index 000000000..3ca9988a6
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/errors/errors.go
@@ -0,0 +1,150 @@
+package errors
+
+import (
+ "fmt"
+
+ "github.com/letsencrypt/boulder/identifier"
+)
+
+// ErrorType provides a coarse category for BoulderErrors.
+// Objects of type ErrorType should never be directly returned by other
+// functions; instead use the methods below to create an appropriate
+// BoulderError wrapping one of these types.
+type ErrorType int
+
+const (
+ InternalServer ErrorType = iota
+ _
+ Malformed
+ Unauthorized
+ NotFound
+ RateLimit
+ RejectedIdentifier
+ InvalidEmail
+ ConnectionFailure
+ _ // Reserved, previously WrongAuthorizationState
+ CAA
+ MissingSCTs
+ Duplicate
+ OrderNotReady
+ DNS
+ BadPublicKey
+ BadCSR
+ AlreadyRevoked
+ BadRevocationReason
+)
+
+func (ErrorType) Error() string {
+ return "urn:ietf:params:acme:error"
+}
+
+// BoulderError represents internal Boulder errors
+type BoulderError struct {
+ Type ErrorType
+ Detail string
+ SubErrors []SubBoulderError
+}
+
+// SubBoulderError represents sub-errors specific to an identifier that are
+// related to a top-level internal Boulder error.
+type SubBoulderError struct {
+ *BoulderError
+ Identifier identifier.ACMEIdentifier
+}
+
+func (be *BoulderError) Error() string {
+ return be.Detail
+}
+
+func (be *BoulderError) Unwrap() error {
+ return be.Type
+}
+
+// WithSubErrors returns a new BoulderError instance created by adding the
+// provided subErrs to the existing BoulderError.
+func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError {
+ return &BoulderError{
+ Type: be.Type,
+ Detail: be.Detail,
+ SubErrors: append(be.SubErrors, subErrs...),
+ }
+}
+
+// New is a convenience function for creating a new BoulderError
+func New(errType ErrorType, msg string, args ...interface{}) error {
+ return &BoulderError{
+ Type: errType,
+ Detail: fmt.Sprintf(msg, args...),
+ }
+}
+
+func InternalServerError(msg string, args ...interface{}) error {
+ return New(InternalServer, msg, args...)
+}
+
+func MalformedError(msg string, args ...interface{}) error {
+ return New(Malformed, msg, args...)
+}
+
+func UnauthorizedError(msg string, args ...interface{}) error {
+ return New(Unauthorized, msg, args...)
+}
+
+func NotFoundError(msg string, args ...interface{}) error {
+ return New(NotFound, msg, args...)
+}
+
+func RateLimitError(msg string, args ...interface{}) error {
+ return &BoulderError{
+ Type: RateLimit,
+ Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...),
+ }
+}
+
+func RejectedIdentifierError(msg string, args ...interface{}) error {
+ return New(RejectedIdentifier, msg, args...)
+}
+
+func InvalidEmailError(msg string, args ...interface{}) error {
+ return New(InvalidEmail, msg, args...)
+}
+
+func ConnectionFailureError(msg string, args ...interface{}) error {
+ return New(ConnectionFailure, msg, args...)
+}
+
+func CAAError(msg string, args ...interface{}) error {
+ return New(CAA, msg, args...)
+}
+
+func MissingSCTsError(msg string, args ...interface{}) error {
+ return New(MissingSCTs, msg, args...)
+}
+
+func DuplicateError(msg string, args ...interface{}) error {
+ return New(Duplicate, msg, args...)
+}
+
+func OrderNotReadyError(msg string, args ...interface{}) error {
+ return New(OrderNotReady, msg, args...)
+}
+
+func DNSError(msg string, args ...interface{}) error {
+ return New(DNS, msg, args...)
+}
+
+func BadPublicKeyError(msg string, args ...interface{}) error {
+ return New(BadPublicKey, msg, args...)
+}
+
+func BadCSRError(msg string, args ...interface{}) error {
+ return New(BadCSR, msg, args...)
+}
+
+func AlreadyRevokedError(msg string, args ...interface{}) error {
+ return New(AlreadyRevoked, msg, args...)
+}
+
+func BadRevocationReasonError(reason int64) error {
+ return New(BadRevocationReason, "disallowed revocation reason: %d", reason)
+}
diff --git a/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go b/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go
new file mode 100644
index 000000000..b3b68b705
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go
@@ -0,0 +1,45 @@
+// Code generated by "stringer -type=FeatureFlag"; DO NOT EDIT.
+
+package features
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[unused-0]
+ _ = x[PrecertificateRevocation-1]
+ _ = x[StripDefaultSchemePort-2]
+ _ = x[NonCFSSLSigner-3]
+ _ = x[StoreIssuerInfo-4]
+ _ = x[StreamlineOrderAndAuthzs-5]
+ _ = x[V1DisableNewValidations-6]
+ _ = x[CAAValidationMethods-7]
+ _ = x[CAAAccountURI-8]
+ _ = x[EnforceMultiVA-9]
+ _ = x[MultiVAFullResults-10]
+ _ = x[MandatoryPOSTAsGET-11]
+ _ = x[AllowV1Registration-12]
+ _ = x[StoreRevokerInfo-13]
+ _ = x[RestrictRSAKeySizes-14]
+ _ = x[FasterNewOrdersRateLimit-15]
+ _ = x[ECDSAForAll-16]
+ _ = x[ServeRenewalInfo-17]
+ _ = x[GetAuthzReadOnly-18]
+ _ = x[GetAuthzUseIndex-19]
+ _ = x[CheckFailedAuthorizationsFirst-20]
+ _ = x[AllowReRevocation-21]
+ _ = x[MozRevocationReasons-22]
+}
+
+const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsCAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasons"
+
+var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 148, 161, 175, 193, 211, 230, 246, 265, 289, 300, 316, 332, 348, 378, 395, 415}
+
+func (i FeatureFlag) String() string {
+ if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) {
+ return "FeatureFlag(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FeatureFlag_name[_FeatureFlag_index[i]:_FeatureFlag_index[i+1]]
+}
diff --git a/vendor/github.com/letsencrypt/boulder/features/features.go b/vendor/github.com/letsencrypt/boulder/features/features.go
new file mode 100644
index 000000000..4608d1d63
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/features/features.go
@@ -0,0 +1,158 @@
+//go:generate stringer -type=FeatureFlag
+
+package features
+
+import (
+ "fmt"
+ "sync"
+)
+
+type FeatureFlag int
+
+const (
+ unused FeatureFlag = iota // unused is used for testing
+ // Deprecated features, these can be removed once stripped from production configs
+ PrecertificateRevocation
+ StripDefaultSchemePort
+ NonCFSSLSigner
+ StoreIssuerInfo
+ StreamlineOrderAndAuthzs
+ V1DisableNewValidations
+
+ // Currently in-use features
+ // Check CAA and respect validationmethods parameter.
+ CAAValidationMethods
+ // Check CAA and respect accounturi parameter.
+ CAAAccountURI
+ // EnforceMultiVA causes the VA to block on remote VA PerformValidation
+ // requests in order to make a valid/invalid decision with the results.
+ EnforceMultiVA
+ // MultiVAFullResults will cause the main VA to wait for all of the remote VA
+ // results, not just the threshold required to make a decision.
+ MultiVAFullResults
+ // MandatoryPOSTAsGET forbids legacy unauthenticated GET requests for ACME
+ // resources.
+ MandatoryPOSTAsGET
+ // Allow creation of new registrations in ACMEv1.
+ AllowV1Registration
+ // StoreRevokerInfo enables storage of the revoker and a bool indicating if the row
+ // was checked for extant unrevoked certificates in the blockedKeys table.
+ StoreRevokerInfo
+ // RestrictRSAKeySizes enables restriction of acceptable RSA public key moduli to
+ // the common sizes (2048, 3072, and 4096 bits).
+ RestrictRSAKeySizes
+ // FasterNewOrdersRateLimit enables use of a separate table for counting the
+ // new orders rate limit.
+ FasterNewOrdersRateLimit
+ // ECDSAForAll enables all accounts, regardless of their presence in the CA's
+ // ecdsaAllowedAccounts config value, to get issuance from ECDSA issuers.
+ ECDSAForAll
+ // ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for
+ // GET requests. WARNING: This feature is a draft and highly unstable.
+ ServeRenewalInfo
+ // GetAuthzReadOnly causes the SA to use its read-only database connection
+ // (which is generally pointed at a replica rather than the primary db) when
+ // querying the authz2 table.
+ GetAuthzReadOnly
+ // GetAuthzUseIndex causes the SA to use to add a USE INDEX hint when it
+ // queries the authz2 table.
+ GetAuthzUseIndex
+ // Check the failed authorization limit before doing authz reuse.
+ CheckFailedAuthorizationsFirst
+ // AllowReRevocation causes the RA to allow the revocation reason of an
+ // already-revoked certificate to be updated to `keyCompromise` from any
+ // other reason if that compromise is demonstrated by making the second
+ // revocation request signed by the certificate keypair.
+ AllowReRevocation
+ // MozRevocationReasons causes the RA to enforce the following upcoming
+ // Mozilla policies regarding revocation:
+ // - A subscriber can request that their certificate be revoked with reason
+ // keyCompromise, even without demonstrating that compromise at the time.
+ // However, the cert's pubkey will not be added to the blocked keys list.
+ // - When an applicant other than the original subscriber requests that a
+ // certificate be revoked (by demonstrating control over all names in it),
+ // the cert will be revoked with reason cessationOfOperation, regardless of
+ // what revocation reason they request.
+ // - When anyone requests that a certificate be revoked by signing the request
+ // with the certificate's keypair, the cert will be revoked with reason
+ // keyCompromise, regardless of what revocation reason they request.
+ MozRevocationReasons
+)
+
+// List of features and their default value, protected by fMu
+var features = map[FeatureFlag]bool{
+ unused: false,
+ CAAValidationMethods: false,
+ CAAAccountURI: false,
+ EnforceMultiVA: false,
+ MultiVAFullResults: false,
+ MandatoryPOSTAsGET: false,
+ AllowV1Registration: true,
+ V1DisableNewValidations: false,
+ PrecertificateRevocation: false,
+ StripDefaultSchemePort: false,
+ StoreIssuerInfo: false,
+ StoreRevokerInfo: false,
+ RestrictRSAKeySizes: false,
+ FasterNewOrdersRateLimit: false,
+ NonCFSSLSigner: false,
+ ECDSAForAll: false,
+ StreamlineOrderAndAuthzs: false,
+ ServeRenewalInfo: false,
+ GetAuthzReadOnly: false,
+ GetAuthzUseIndex: false,
+ CheckFailedAuthorizationsFirst: false,
+ AllowReRevocation: false,
+ MozRevocationReasons: false,
+}
+
+var fMu = new(sync.RWMutex)
+
+var initial = map[FeatureFlag]bool{}
+
+var nameToFeature = make(map[string]FeatureFlag, len(features))
+
+func init() {
+ for f, v := range features {
+ nameToFeature[f.String()] = f
+ initial[f] = v
+ }
+}
+
+// Set accepts a list of features and whether they should
+// be enabled or disabled, it will return a error if passed
+// a feature name that it doesn't know
+func Set(featureSet map[string]bool) error {
+ fMu.Lock()
+ defer fMu.Unlock()
+ for n, v := range featureSet {
+ f, present := nameToFeature[n]
+ if !present {
+ return fmt.Errorf("feature '%s' doesn't exist", n)
+ }
+ features[f] = v
+ }
+ return nil
+}
+
+// Enabled returns true if the feature is enabled or false
+// if it isn't, it will panic if passed a feature that it
+// doesn't know.
+func Enabled(n FeatureFlag) bool {
+ fMu.RLock()
+ defer fMu.RUnlock()
+ v, present := features[n]
+ if !present {
+ panic(fmt.Sprintf("feature '%s' doesn't exist", n.String()))
+ }
+ return v
+}
+
+// Reset resets the features to their initial state
+func Reset() {
+ fMu.Lock()
+ defer fMu.Unlock()
+ for k, v := range initial {
+ features[k] = v
+ }
+}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
new file mode 100644
index 000000000..3457f5b12
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
@@ -0,0 +1,98 @@
+package goodkey
+
+import (
+ "crypto"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "io/ioutil"
+
+ "github.com/letsencrypt/boulder/core"
+
+ yaml "gopkg.in/yaml.v2"
+)
+
+// blockedKeys is a type for maintaining a map of SHA256 hashes
+// of SubjectPublicKeyInfo's that should be considered blocked.
+// blockedKeys are created by using loadBlockedKeysList.
+type blockedKeys map[core.Sha256Digest]bool
+
+var ErrWrongDecodedSize = errors.New("not enough bytes decoded for sha256 hash")
+
+// blocked checks if the given public key is considered administratively
+// blocked based on a SHA256 hash of the SubjectPublicKeyInfo.
+// Important: blocked should not be called except on a blockedKeys instance
+// returned from loadBlockedKeysList.
+// function should not be used until after `loadBlockedKeysList` has returned.
+func (b blockedKeys) blocked(key crypto.PublicKey) (bool, error) {
+ hash, err := core.KeyDigest(key)
+ if err != nil {
+ // the bool result should be ignored when err is != nil but to be on the
+ // paranoid side return true anyway so that a key we can't compute the
+ // digest for will always be blocked even if a caller foolishly discards the
+ // err result.
+ return true, err
+ }
+ return b[hash], nil
+}
+
+// loadBlockedKeysList creates a blockedKeys object that can be used to check if
+// a key is blocked. It creates a lookup map from a list of
+// SHA256 hashes of SubjectPublicKeyInfo's in the input YAML file
+// with the expected format:
+//
+// ```
+// blocked:
+// - cuwGhNNI6nfob5aqY90e7BleU6l7rfxku4X3UTJ3Z7M=
+// <snipped>
+// - Qebc1V3SkX3izkYRGNJilm9Bcuvf0oox4U2Rn+b4JOE=
+// ```
+//
+// If no hashes are found in the input YAML an error is returned.
+func loadBlockedKeysList(filename string) (*blockedKeys, error) {
+ yamlBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var list struct {
+ BlockedHashes []string `yaml:"blocked"`
+ BlockedHashesHex []string `yaml:"blockedHashesHex"`
+ }
+ err = yaml.Unmarshal(yamlBytes, &list)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(list.BlockedHashes) == 0 && len(list.BlockedHashesHex) == 0 {
+ return nil, errors.New("no blocked hashes in YAML")
+ }
+
+ blockedKeys := make(blockedKeys, len(list.BlockedHashes)+len(list.BlockedHashesHex))
+ for _, b64Hash := range list.BlockedHashes {
+ decoded, err := base64.StdEncoding.DecodeString(b64Hash)
+ if err != nil {
+ return nil, err
+ }
+ if len(decoded) != sha256.Size {
+ return nil, ErrWrongDecodedSize
+ }
+ var sha256Digest core.Sha256Digest
+ copy(sha256Digest[:], decoded[0:sha256.Size])
+ blockedKeys[sha256Digest] = true
+ }
+ for _, hexHash := range list.BlockedHashesHex {
+ decoded, err := hex.DecodeString(hexHash)
+ if err != nil {
+ return nil, err
+ }
+ if len(decoded) != sha256.Size {
+ return nil, ErrWrongDecodedSize
+ }
+ var sha256Digest core.Sha256Digest
+ copy(sha256Digest[:], decoded[0:sha256.Size])
+ blockedKeys[sha256Digest] = true
+ }
+ return &blockedKeys, nil
+}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
new file mode 100644
index 000000000..b751c376c
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
@@ -0,0 +1,432 @@
+package goodkey
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "math/big"
+ "sync"
+
+ "github.com/letsencrypt/boulder/core"
+ berrors "github.com/letsencrypt/boulder/errors"
+ "github.com/letsencrypt/boulder/features"
+ sapb "github.com/letsencrypt/boulder/sa/proto"
+ "google.golang.org/grpc"
+
+ "github.com/titanous/rocacheck"
+)
+
+// To generate, run: primes 2 752 | tr '\n' ,
+var smallPrimeInts = []int64{
+ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47,
+ 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107,
+ 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167,
+ 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
+ 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283,
+ 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
+ 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
+ 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491,
+ 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
+ 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641,
+ 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
+ 719, 727, 733, 739, 743, 751,
+}
+
+// singleton defines the object of a Singleton pattern
+var (
+ smallPrimesSingleton sync.Once
+ smallPrimesProduct *big.Int
+)
+
+type Config struct {
+ // WeakKeyFile is the path to a JSON file containing truncated modulus hashes
+ // of known weak RSA keys. If this config value is empty, then RSA modulus
+ // hash checking will be disabled.
+ WeakKeyFile string
+ // BlockedKeyFile is the path to a YAML file containing base64-encoded SHA256
+ // hashes of PKIX Subject Public Keys that should be blocked. If this config
+ // value is empty, then blocked key checking will be disabled.
+ BlockedKeyFile string
+ // FermatRounds is an integer number of rounds of Fermat's factorization
+ // method that should be performed to attempt to detect keys whose modulus can
+ // be trivially factored because the two factors are very close to each other.
+ // If this config value is empty (0), no factorization will be attempted.
+ FermatRounds int
+}
+
+// ErrBadKey represents an error with a key. It is distinct from the various
+// ways in which an ACME request can have an erroneous key (BadPublicKeyError,
+// BadCSRError) because this library is used to check both JWS signing keys and
+// keys in CSRs.
+var ErrBadKey = errors.New("")
+
+func badKey(msg string, args ...interface{}) error {
+ return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...))
+}
+
+// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy,
+// rather than storing a full sa.SQLStorageAuthority. This makes testing
+// significantly simpler.
+type BlockedKeyCheckFunc func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error)
+
+// KeyPolicy determines which types of key may be used with various boulder
+// operations.
+type KeyPolicy struct {
+ AllowRSA bool // Whether RSA keys should be allowed.
+ AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed.
+ AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed.
+ weakRSAList *WeakRSAKeys
+ blockedList *blockedKeys
+ fermatRounds int
+ dbCheck BlockedKeyCheckFunc
+}
+
+// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384.
+// weakKeyFile contains the path to a JSON file containing truncated modulus
+// hashes of known weak RSA keys. If this argument is empty RSA modulus hash
+// checking will be disabled. blockedKeyFile contains the path to a YAML file
+// containing Base64 encoded SHA256 hashes of pkix subject public keys that
+// should be blocked. If this argument is empty then no blocked key checking is
+// performed.
+func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
+ kp := KeyPolicy{
+ AllowRSA: true,
+ AllowECDSANISTP256: true,
+ AllowECDSANISTP384: true,
+ dbCheck: bkc,
+ }
+ if config.WeakKeyFile != "" {
+ keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile)
+ if err != nil {
+ return KeyPolicy{}, err
+ }
+ kp.weakRSAList = keyList
+ }
+ if config.BlockedKeyFile != "" {
+ blocked, err := loadBlockedKeysList(config.BlockedKeyFile)
+ if err != nil {
+ return KeyPolicy{}, err
+ }
+ kp.blockedList = blocked
+ }
+ if config.FermatRounds < 0 {
+ return KeyPolicy{}, fmt.Errorf("Fermat factorization rounds cannot be negative: %d", config.FermatRounds)
+ }
+ kp.fermatRounds = config.FermatRounds
+ return kp, nil
+}
+
+// GoodKey returns true if the key is acceptable for both TLS use and account
+// key use (our requirements are the same for either one), according to basic
+// strength and algorithm checking. GoodKey only supports pointers: *rsa.PublicKey
+// and *ecdsa.PublicKey. It will reject non-pointer types.
+// TODO: Support JSONWebKeys once go-jose migration is done.
+func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) error {
+ // Early rejection of unacceptable key types to guard subsequent checks.
+ switch t := key.(type) {
+ case *rsa.PublicKey, *ecdsa.PublicKey:
+ break
+ default:
+ return badKey("unsupported key type %T", t)
+ }
+ // If there is a blocked list configured then check if the public key is one
+ // that has been administratively blocked.
+ if policy.blockedList != nil {
+ if blocked, err := policy.blockedList.blocked(key); err != nil {
+ return berrors.InternalServerError("error checking blocklist for key: %v", key)
+ } else if blocked {
+ return badKey("public key is forbidden")
+ }
+ }
+ if policy.dbCheck != nil {
+ digest, err := core.KeyDigest(key)
+ if err != nil {
+ return badKey("%w", err)
+ }
+ exists, err := policy.dbCheck(ctx, &sapb.KeyBlockedRequest{KeyHash: digest[:]})
+ if err != nil {
+ return err
+ } else if exists.Exists {
+ return badKey("public key is forbidden")
+ }
+ }
+ switch t := key.(type) {
+ case *rsa.PublicKey:
+ return policy.goodKeyRSA(t)
+ case *ecdsa.PublicKey:
+ return policy.goodKeyECDSA(t)
+ default:
+ return badKey("unsupported key type %T", key)
+ }
+}
+
+// GoodKeyECDSA determines if an ECDSA pubkey meets our requirements
+func (policy *KeyPolicy) goodKeyECDSA(key *ecdsa.PublicKey) (err error) {
+ // Check the curve.
+ //
+ // The validity of the curve is an assumption for all following tests.
+ err = policy.goodCurve(key.Curve)
+ if err != nil {
+ return err
+ }
+
+ // Key validation routine adapted from NIST SP800-56A § 5.6.2.3.2.
+ // <http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Ar2.pdf>
+ //
+ // Assuming a prime field since a) we are only allowing such curves and b)
+ // crypto/elliptic only supports prime curves. Where this assumption
+ // simplifies the code below, it is explicitly stated and explained. If ever
+ // adapting this code to support non-prime curves, refer to NIST SP800-56A §
+ // 5.6.2.3.2 and adapt this code appropriately.
+ params := key.Params()
+
+ // SP800-56A § 5.6.2.3.2 Step 1.
+ // Partial check of the public key for an invalid range in the EC group:
+ // Verify that key is not the point at infinity O.
+ // This code assumes that the point at infinity is (0,0), which is the
+ // case for all supported curves.
+ if isPointAtInfinityNISTP(key.X, key.Y) {
+ return badKey("key x, y must not be the point at infinity")
+ }
+
+ // SP800-56A § 5.6.2.3.2 Step 2.
+ // "Verify that x_Q and y_Q are integers in the interval [0,p-1] in the
+ // case that q is an odd prime p, or that x_Q and y_Q are bit strings
+ // of length m bits in the case that q = 2**m."
+ //
+ // Prove prime field: ASSUMED.
+ // Prove q != 2: ASSUMED. (Curve parameter. No supported curve has q == 2.)
+ // Prime field && q != 2 => q is an odd prime p
+ // Therefore "verify that x, y are in [0, p-1]" satisfies step 2.
+ //
+ // Therefore verify that both x and y of the public key point have the unique
+ // correct representation of an element in the underlying field by verifying
+ // that x and y are integers in [0, p-1].
+ if key.X.Sign() < 0 || key.Y.Sign() < 0 {
+ return badKey("key x, y must not be negative")
+ }
+
+ if key.X.Cmp(params.P) >= 0 || key.Y.Cmp(params.P) >= 0 {
+ return badKey("key x, y must not exceed P-1")
+ }
+
+ // SP800-56A § 5.6.2.3.2 Step 3.
+ // "If q is an odd prime p, verify that (y_Q)**2 === (x_Q)***3 + a*x_Q + b (mod p).
+ // If q = 2**m, verify that (y_Q)**2 + (x_Q)*(y_Q) == (x_Q)**3 + a*(x_Q)*2 + b in
+ // the finite field of size 2**m.
+ // (Ensures that the public key is on the correct elliptic curve.)"
+ //
+ // q is an odd prime p: proven/assumed above.
+ // a = -3 for all supported curves.
+ //
+ // Therefore step 3 is satisfied simply by showing that
+ // y**2 === x**3 - 3*x + B (mod P).
+ //
+ // This proves that the public key is on the correct elliptic curve.
+ // But in practice, this test is provided by crypto/elliptic, so use that.
+ if !key.Curve.IsOnCurve(key.X, key.Y) {
+ return badKey("key point is not on the curve")
+ }
+
+ // SP800-56A § 5.6.2.3.2 Step 4.
+ // "Verify that n*Q == Ø.
+ // (Ensures that the public key has the correct order. Along with check 1,
+ // ensures that the public key is in the correct range in the correct EC
+ // subgroup, that is, it is in the correct EC subgroup and is not the
+ // identity element.)"
+ //
+ // Ensure that public key has the correct order:
+ // verify that n*Q = Ø.
+ //
+ // n*Q = Ø iff n*Q is the point at infinity (see step 1).
+ ox, oy := key.Curve.ScalarMult(key.X, key.Y, params.N.Bytes())
+ if !isPointAtInfinityNISTP(ox, oy) {
+ return badKey("public key does not have correct order")
+ }
+
+ // End of SP800-56A § 5.6.2.3.2 Public Key Validation Routine.
+ // Key is valid.
+ return nil
+}
+
+// Returns true iff the point (x,y) on NIST P-256, NIST P-384 or NIST P-521 is
+// the point at infinity. These curves all have the same point at infinity
+// (0,0). This function must ONLY be used on points on curves verified to have
+// (0,0) as their point at infinity.
+func isPointAtInfinityNISTP(x, y *big.Int) bool {
+ return x.Sign() == 0 && y.Sign() == 0
+}
+
+// GoodCurve determines if an elliptic curve meets our requirements.
+func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
+ // Simply use a whitelist for now.
+ params := c.Params()
+ switch {
+ case policy.AllowECDSANISTP256 && params == elliptic.P256().Params():
+ return nil
+ case policy.AllowECDSANISTP384 && params == elliptic.P384().Params():
+ return nil
+ default:
+ return badKey("ECDSA curve %v not allowed", params.Name)
+ }
+}
+
+var acceptableRSAKeySizes = map[int]bool{
+ 2048: true,
+ 3072: true,
+ 4096: true,
+}
+
+// GoodKeyRSA determines if a RSA pubkey meets our requirements
+func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
+ if !policy.AllowRSA {
+ return badKey("RSA keys are not allowed")
+ }
+ if policy.weakRSAList != nil && policy.weakRSAList.Known(key) {
+ return badKey("key is on a known weak RSA key list")
+ }
+
+ // Baseline Requirements Appendix A
+ // Modulus must be >= 2048 bits and <= 4096 bits
+ modulus := key.N
+ modulusBitLen := modulus.BitLen()
+ if features.Enabled(features.RestrictRSAKeySizes) {
+ if !acceptableRSAKeySizes[modulusBitLen] {
+ return badKey("key size not supported: %d", modulusBitLen)
+ }
+ } else {
+ const maxKeySize = 4096
+ if modulusBitLen < 2048 {
+ return badKey("key too small: %d", modulusBitLen)
+ }
+ if modulusBitLen > maxKeySize {
+ return badKey("key too large: %d > %d", modulusBitLen, maxKeySize)
+ }
+ // Bit lengths that are not a multiple of 8 may cause problems on some
+ // client implementations.
+ if modulusBitLen%8 != 0 {
+ return badKey("key length wasn't a multiple of 8: %d", modulusBitLen)
+ }
+ }
+
+ // Rather than support arbitrary exponents, which significantly increases
+ // the size of the key space we allow, we restrict E to the defacto standard
+ // RSA exponent 65537. There is no specific standards document that specifies
+ // 65537 as the 'best' exponent, but ITU X.509 Annex C suggests there are
+ // notable merits for using it if using a fixed exponent.
+ //
+ // The CABF Baseline Requirements state:
+ // The CA SHALL confirm that the value of the public exponent is an
+ // odd number equal to 3 or more. Additionally, the public exponent
+ // SHOULD be in the range between 2^16 + 1 and 2^256-1.
+ //
+ // By only allowing one exponent, which fits these constraints, we satisfy
+ // these requirements.
+ if key.E != 65537 {
+ return badKey("key exponent must be 65537")
+ }
+
+ // The modulus SHOULD also have the following characteristics: an odd
+ // number, not the power of a prime, and have no factors smaller than 752.
+ // TODO: We don't yet check for "power of a prime."
+ if checkSmallPrimes(modulus) {
+ return badKey("key divisible by small prime")
+ }
+ // Check for weak keys generated by Infineon hardware
+ // (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
+ if rocacheck.IsWeak(key) {
+ return badKey("key generated by vulnerable Infineon-based hardware")
+ }
+ // Check if the key can be easily factored via Fermat's factorization method.
+ if policy.fermatRounds > 0 {
+ err := checkPrimeFactorsTooClose(modulus, policy.fermatRounds)
+ if err != nil {
+ return badKey("key generated with factors too close together: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// Returns true iff integer i is divisible by any of the primes in smallPrimes.
+//
+// Short circuits; execution time is dependent on i. Do not use this on secret
+// values.
+//
+// Rather than checking each prime individually (invoking Mod on each),
+// multiply the primes together and let GCD do our work for us: if the
+// GCD between <key> and <product of primes> is not one, we know we have
+// a bad key. This is substantially faster than checking each prime
+// individually.
+func checkSmallPrimes(i *big.Int) bool {
+ smallPrimesSingleton.Do(func() {
+ smallPrimesProduct = big.NewInt(1)
+ for _, prime := range smallPrimeInts {
+ smallPrimesProduct.Mul(smallPrimesProduct, big.NewInt(prime))
+ }
+ })
+
+ // When the GCD is 1, i and smallPrimesProduct are coprime, meaning they
+ // share no common factors. When the GCD is not one, it is the product of
+ // all common factors, meaning we've identified at least one small prime
+ // which invalidates i as a valid key.
+
+ var result big.Int
+ result.GCD(nil, nil, i, smallPrimesProduct)
+ return result.Cmp(big.NewInt(1)) != 0
+}
+
+// Returns an error if the modulus n is able to be factored into primes p and q
+// via Fermat's factorization method. This method relies on the two primes being
+// very close together, which means that they were almost certainly not picked
+// independently from a uniform random distribution. Basically, if we can factor
+// the key this easily, so can anyone else.
+func checkPrimeFactorsTooClose(n *big.Int, rounds int) error {
+ // Pre-allocate some big numbers that we'll use a lot down below.
+ one := big.NewInt(1)
+ bb := new(big.Int)
+
+ // Any odd integer is equal to a difference of squares of integers:
+ // n = a^2 - b^2 = (a + b)(a - b)
+ // Any RSA public key modulus is equal to a product of two primes:
+ // n = pq
+ // Here we try to find values for a and b, since doing so also gives us the
+ // prime factors p = (a + b) and q = (a - b).
+
+ // We start with a close to the square root of the modulus n, to start with
+ // two candidate prime factors that are as close together as possible and
+ // work our way out from there. Specifically, we set a = ceil(sqrt(n)), the
+ // first integer greater than the square root of n. Unfortunately, big.Int's
+ // built-in square root function takes the floor, so we have to add one to get
+ // the ceil.
+ a := new(big.Int)
+ a.Sqrt(n).Add(a, one)
+
+ // We calculate b2 to see if it is a perfect square (i.e. b^2), and therefore
+ // b is an integer. Specifically, b2 = a^2 - n.
+ b2 := new(big.Int)
+ b2.Mul(a, a).Sub(b2, n)
+
+ for i := 0; i < rounds; i++ {
+ // To see if b2 is a perfect square, we take its square root, square that,
+ // and check to see if we got the same result back.
+ bb.Sqrt(b2).Mul(bb, bb)
+ if b2.Cmp(bb) == 0 {
+ // b2 is a perfect square, so we've found integer values of a and b,
+ // and can easily compute p and q as their sum and difference.
+ bb.Sqrt(bb)
+ p := new(big.Int).Add(a, bb)
+ q := new(big.Int).Sub(a, bb)
+ return fmt.Errorf("public modulus n = pq factored into p: %s; q: %s", p, q)
+ }
+
+ // Set up the next iteration by incrementing a by one and recalculating b2.
+ a.Add(a, one)
+ b2.Mul(a, a).Sub(b2, n)
+ }
+ return nil
+}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/weak.go b/vendor/github.com/letsencrypt/boulder/goodkey/weak.go
new file mode 100644
index 000000000..4a63af09a
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/weak.go
@@ -0,0 +1,66 @@
+package goodkey
+
+// This file defines a basic method for testing if a given RSA public key is on one of
+// the Debian weak key lists and is therefore considered compromised. Instead of
+// directly loading the hash suffixes from the individual lists we flatten them all
+// into a single JSON list using cmd/weak-key-flatten for ease of use.
+
+import (
+ "crypto/rsa"
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+)
+
+type truncatedHash [10]byte
+
+type WeakRSAKeys struct {
+ suffixes map[truncatedHash]struct{}
+}
+
+func LoadWeakRSASuffixes(path string) (*WeakRSAKeys, error) {
+ f, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var suffixList []string
+ err = json.Unmarshal(f, &suffixList)
+ if err != nil {
+ return nil, err
+ }
+
+ wk := &WeakRSAKeys{suffixes: make(map[truncatedHash]struct{})}
+ for _, suffix := range suffixList {
+ err := wk.addSuffix(suffix)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return wk, nil
+}
+
+func (wk *WeakRSAKeys) addSuffix(str string) error {
+ var suffix truncatedHash
+ decoded, err := hex.DecodeString(str)
+ if err != nil {
+ return err
+ }
+ if len(decoded) != 10 {
+ return fmt.Errorf("unexpected suffix length of %d", len(decoded))
+ }
+ copy(suffix[:], decoded)
+ wk.suffixes[suffix] = struct{}{}
+ return nil
+}
+
+func (wk *WeakRSAKeys) Known(key *rsa.PublicKey) bool {
+ // Hash input is in the format "Modulus={upper-case hex of modulus}\n"
+ hash := sha1.Sum([]byte(fmt.Sprintf("Modulus=%X\n", key.N.Bytes())))
+ var suffix truncatedHash
+ copy(suffix[:], hash[10:])
+ _, present := wk.suffixes[suffix]
+ return present
+}
diff --git a/vendor/github.com/letsencrypt/boulder/identifier/identifier.go b/vendor/github.com/letsencrypt/boulder/identifier/identifier.go
new file mode 100644
index 000000000..cbf228f86
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/identifier/identifier.go
@@ -0,0 +1,32 @@
+// The identifier package defines types for RFC 8555 ACME identifiers.
+package identifier
+
+// IdentifierType is a named string type for registered ACME identifier types.
+// See https://tools.ietf.org/html/rfc8555#section-9.7.7
+type IdentifierType string
+
+const (
+ // DNS is specified in RFC 8555 for DNS type identifiers.
+ DNS = IdentifierType("dns")
+)
+
+// ACMEIdentifier is a struct encoding an identifier that can be validated. The
+// protocol allows for different types of identifier to be supported (DNS
+// names, IP addresses, etc.), but currently we only support RFC 8555 DNS type
+// identifiers for domain names.
+type ACMEIdentifier struct {
+ // Type is the registered IdentifierType of the identifier.
+ Type IdentifierType `json:"type"`
+ // Value is the value of the identifier. For a DNS type identifier it is
+ // a domain name.
+ Value string `json:"value"`
+}
+
+// DNSIdentifier is a convenience function for creating an ACMEIdentifier with
+// Type DNS for a given domain name.
+func DNSIdentifier(domain string) ACMEIdentifier {
+ return ACMEIdentifier{
+ Type: DNS,
+ Value: domain,
+ }
+}
diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go
new file mode 100644
index 000000000..3736e8d39
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go
@@ -0,0 +1,349 @@
+package probs
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/letsencrypt/boulder/identifier"
+)
+
+// Error types that can be used in ACME payloads
+const (
+ ConnectionProblem = ProblemType("connection")
+ MalformedProblem = ProblemType("malformed")
+ ServerInternalProblem = ProblemType("serverInternal")
+ TLSProblem = ProblemType("tls")
+ UnauthorizedProblem = ProblemType("unauthorized")
+ RateLimitedProblem = ProblemType("rateLimited")
+ BadNonceProblem = ProblemType("badNonce")
+ InvalidEmailProblem = ProblemType("invalidEmail")
+ RejectedIdentifierProblem = ProblemType("rejectedIdentifier")
+ AccountDoesNotExistProblem = ProblemType("accountDoesNotExist")
+ CAAProblem = ProblemType("caa")
+ DNSProblem = ProblemType("dns")
+ AlreadyRevokedProblem = ProblemType("alreadyRevoked")
+ OrderNotReadyProblem = ProblemType("orderNotReady")
+ BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm")
+ BadPublicKeyProblem = ProblemType("badPublicKey")
+ BadRevocationReasonProblem = ProblemType("badRevocationReason")
+ BadCSRProblem = ProblemType("badCSR")
+
+ V1ErrorNS = "urn:acme:error:"
+ V2ErrorNS = "urn:ietf:params:acme:error:"
+)
+
+// ProblemType defines the error types in the ACME protocol
+type ProblemType string
+
+// ProblemDetails objects represent problem documents
+// https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
+type ProblemDetails struct {
+ Type ProblemType `json:"type,omitempty"`
+ Detail string `json:"detail,omitempty"`
+ // HTTPStatus is the HTTP status code the ProblemDetails should probably be sent
+ // as.
+ HTTPStatus int `json:"status,omitempty"`
+ // SubProblems are optional additional per-identifier problems. See
+ // RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1
+ SubProblems []SubProblemDetails `json:"subproblems,omitempty"`
+}
+
+// SubProblemDetails represents sub-problems specific to an identifier that are
+// related to a top-level ProblemDetails.
+// See RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1
+type SubProblemDetails struct {
+ ProblemDetails
+ Identifier identifier.ACMEIdentifier `json:"identifier"`
+}
+
+func (pd *ProblemDetails) Error() string {
+ return fmt.Sprintf("%s :: %s", pd.Type, pd.Detail)
+}
+
+// WithSubProblems returns a new ProblemsDetails instance created by adding the
+// provided subProbs to the existing ProblemsDetail.
+func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *ProblemDetails {
+ return &ProblemDetails{
+ Type: pd.Type,
+ Detail: pd.Detail,
+ HTTPStatus: pd.HTTPStatus,
+ SubProblems: append(pd.SubProblems, subProbs...),
+ }
+}
+
+// statusTooManyRequests is the HTTP status code meant for rate limiting
+// errors. It's not currently in the net/http library so we add it here.
+const statusTooManyRequests = 429
+
+// ProblemDetailsToStatusCode inspects the given ProblemDetails to figure out
+// what HTTP status code it should represent. It should only be used by the WFE
+// but is included in this package because of its reliance on ProblemTypes.
+func ProblemDetailsToStatusCode(prob *ProblemDetails) int {
+ if prob.HTTPStatus != 0 {
+ return prob.HTTPStatus
+ }
+ switch prob.Type {
+ case
+ ConnectionProblem,
+ MalformedProblem,
+ BadSignatureAlgorithmProblem,
+ BadPublicKeyProblem,
+ TLSProblem,
+ BadNonceProblem,
+ InvalidEmailProblem,
+ RejectedIdentifierProblem,
+ AccountDoesNotExistProblem,
+ BadRevocationReasonProblem:
+ return http.StatusBadRequest
+ case ServerInternalProblem:
+ return http.StatusInternalServerError
+ case
+ UnauthorizedProblem,
+ CAAProblem:
+ return http.StatusForbidden
+ case RateLimitedProblem:
+ return statusTooManyRequests
+ default:
+ return http.StatusInternalServerError
+ }
+}
+
+// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad
+// Request status code.
+func BadNonce(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadNonceProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad
+// Request status code.
+func RejectedIdentifier(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: RejectedIdentifierProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict
+// status code.
+func Conflict(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusConflict,
+ }
+}
+
+// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad
+// Request status code.
+func AlreadyRevoked(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: AlreadyRevokedProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad
+// Request status code.
+func Malformed(detail string, args ...interface{}) *ProblemDetails {
+ if len(args) > 0 {
+ detail = fmt.Sprintf(detail, args...)
+ }
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request
+// Timeout status code.
+func Canceled(detail string, args ...interface{}) *ProblemDetails {
+ if len(args) > 0 {
+ detail = fmt.Sprintf(detail, args...)
+ }
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusRequestTimeout,
+ }
+}
+
+// BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem
+// and a 400 Bad Request status code.
+func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadSignatureAlgorithmProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad
+// Request status code.
+func BadPublicKey(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadPublicKeyProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found
+// status code.
+func NotFound(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusNotFound,
+ }
+}
+
+// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a
+// 500 Internal Server Failure status code.
+func ServerInternal(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: ServerInternalProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusInternalServerError,
+ }
+}
+
+// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403
+// Forbidden status code.
+func Unauthorized(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: UnauthorizedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusForbidden,
+ }
+}
+
+// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP
+// method error.
+func MethodNotAllowed() *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: "Method not allowed",
+ HTTPStatus: http.StatusMethodNotAllowed,
+ }
+}
+
+// ContentLengthRequired returns a ProblemDetails representing a missing
+// Content-Length header error
+func ContentLengthRequired() *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: "missing Content-Length header",
+ HTTPStatus: http.StatusLengthRequired,
+ }
+}
+
+// InvalidContentType returns a ProblemDetails suitable for a missing
+// ContentType header, or an incorrect ContentType header
+func InvalidContentType(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusUnsupportedMediaType,
+ }
+}
+
+// InvalidEmail returns a ProblemDetails representing an invalid email address
+// error
+func InvalidEmail(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: InvalidEmailProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// ConnectionFailure returns a ProblemDetails representing a ConnectionProblem
+// error
+func ConnectionFailure(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: ConnectionProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// RateLimited returns a ProblemDetails representing a RateLimitedProblem error
+func RateLimited(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: RateLimitedProblem,
+ Detail: detail,
+ HTTPStatus: statusTooManyRequests,
+ }
+}
+
+// TLSError returns a ProblemDetails representing a TLSProblem error
+func TLSError(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: TLSProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// AccountDoesNotExist returns a ProblemDetails representing an
+// AccountDoesNotExistProblem error
+func AccountDoesNotExist(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: AccountDoesNotExistProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// CAA returns a ProblemDetails representing a CAAProblem
+func CAA(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: CAAProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusForbidden,
+ }
+}
+
+// DNS returns a ProblemDetails representing a DNSProblem
+func DNS(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: DNSProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem
+func OrderNotReady(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: OrderNotReadyProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusForbidden,
+ }
+}
+
+// BadRevocationReason returns a ProblemDetails representing
+// a BadRevocationReasonProblem
+func BadRevocationReason(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadRevocationReasonProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// BadCSR returns a ProblemDetails representing a BadCSRProblem.
+func BadCSR(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadCSRProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
diff --git a/vendor/github.com/letsencrypt/boulder/revocation/reasons.go b/vendor/github.com/letsencrypt/boulder/revocation/reasons.go
new file mode 100644
index 000000000..a5b3f0807
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/revocation/reasons.go
@@ -0,0 +1,74 @@
+package revocation
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "golang.org/x/crypto/ocsp"
+)
+
+// Reason is used to specify a certificate revocation reason
+type Reason int
+
+// ReasonToString provides a map from reason code to string
+var ReasonToString = map[Reason]string{
+ ocsp.Unspecified: "unspecified",
+ ocsp.KeyCompromise: "keyCompromise",
+ ocsp.CACompromise: "cACompromise",
+ ocsp.AffiliationChanged: "affiliationChanged",
+ ocsp.Superseded: "superseded",
+ ocsp.CessationOfOperation: "cessationOfOperation",
+ ocsp.CertificateHold: "certificateHold",
+ // 7 is unused
+ ocsp.RemoveFromCRL: "removeFromCRL",
+ ocsp.PrivilegeWithdrawn: "privilegeWithdrawn",
+ ocsp.AACompromise: "aAcompromise",
+}
+
+// UserAllowedReasons contains the subset of Reasons which users are
+// allowed to use
+var UserAllowedReasons = map[Reason]struct{}{
+ ocsp.Unspecified: {},
+ ocsp.KeyCompromise: {},
+ ocsp.AffiliationChanged: {},
+ ocsp.Superseded: {},
+ ocsp.CessationOfOperation: {},
+}
+
+// AdminAllowedReasons contains the subset of Reasons which admins are allowed
+// to use. Reasons not found here will soon be forbidden from appearing in CRLs
+// or OCSP responses by root programs.
+var AdminAllowedReasons = map[Reason]struct{}{
+ ocsp.Unspecified: {},
+ ocsp.KeyCompromise: {},
+ ocsp.AffiliationChanged: {},
+ ocsp.Superseded: {},
+ ocsp.CessationOfOperation: {},
+ ocsp.PrivilegeWithdrawn: {},
+}
+
+// UserAllowedReasonsMessage contains a string describing a list of user allowed
+// revocation reasons. This is useful when a revocation is rejected because it
+// is not a valid user supplied reason and the allowed values must be
+// communicated. This variable is populated during package initialization.
+var UserAllowedReasonsMessage = ""
+
+func init() {
+ // Build a slice of ints from the allowed reason codes.
+ // We want a slice because iterating `UserAllowedReasons` will change order
+ // and make the message unpredictable and cumbersome for unit testing.
+ // We use []ints instead of []Reason to use `sort.Ints` without fuss.
+ var allowed []int
+ for reason := range UserAllowedReasons {
+ allowed = append(allowed, int(reason))
+ }
+ sort.Ints(allowed)
+
+ var reasonStrings []string
+ for _, reason := range allowed {
+ reasonStrings = append(reasonStrings, fmt.Sprintf("%s (%d)",
+ ReasonToString[Reason(reason)], reason))
+ }
+ UserAllowedReasonsMessage = strings.Join(reasonStrings, ", ")
+}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go
new file mode 100644
index 000000000..b88df399a
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go
@@ -0,0 +1,3449 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.15.6
+// source: sa.proto
+
+package proto
+
+import (
+ proto "github.com/letsencrypt/boulder/core/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type RegistrationID struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *RegistrationID) Reset() {
+ *x = RegistrationID{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegistrationID) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegistrationID) ProtoMessage() {}
+
+func (x *RegistrationID) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegistrationID.ProtoReflect.Descriptor instead.
+func (*RegistrationID) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *RegistrationID) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type JSONWebKey struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Jwk []byte `protobuf:"bytes,1,opt,name=jwk,proto3" json:"jwk,omitempty"`
+}
+
+func (x *JSONWebKey) Reset() {
+ *x = JSONWebKey{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *JSONWebKey) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONWebKey) ProtoMessage() {}
+
+func (x *JSONWebKey) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use JSONWebKey.ProtoReflect.Descriptor instead.
+func (*JSONWebKey) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *JSONWebKey) GetJwk() []byte {
+ if x != nil {
+ return x.Jwk
+ }
+ return nil
+}
+
+type AuthorizationID struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *AuthorizationID) Reset() {
+ *x = AuthorizationID{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AuthorizationID) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AuthorizationID) ProtoMessage() {}
+
+func (x *AuthorizationID) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AuthorizationID.ProtoReflect.Descriptor instead.
+func (*AuthorizationID) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *AuthorizationID) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+type GetPendingAuthorizationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ IdentifierType string `protobuf:"bytes,2,opt,name=identifierType,proto3" json:"identifierType,omitempty"`
+ IdentifierValue string `protobuf:"bytes,3,opt,name=identifierValue,proto3" json:"identifierValue,omitempty"`
+ // Result must be valid until at least this Unix timestamp (nanos)
+ ValidUntil int64 `protobuf:"varint,4,opt,name=validUntil,proto3" json:"validUntil,omitempty"`
+}
+
+func (x *GetPendingAuthorizationRequest) Reset() {
+ *x = GetPendingAuthorizationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetPendingAuthorizationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetPendingAuthorizationRequest) ProtoMessage() {}
+
+func (x *GetPendingAuthorizationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetPendingAuthorizationRequest.ProtoReflect.Descriptor instead.
+func (*GetPendingAuthorizationRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *GetPendingAuthorizationRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *GetPendingAuthorizationRequest) GetIdentifierType() string {
+ if x != nil {
+ return x.IdentifierType
+ }
+ return ""
+}
+
+func (x *GetPendingAuthorizationRequest) GetIdentifierValue() string {
+ if x != nil {
+ return x.IdentifierValue
+ }
+ return ""
+}
+
+func (x *GetPendingAuthorizationRequest) GetValidUntil() int64 {
+ if x != nil {
+ return x.ValidUntil
+ }
+ return 0
+}
+
+type GetValidAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
+ Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *GetValidAuthorizationsRequest) Reset() {
+ *x = GetValidAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetValidAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetValidAuthorizationsRequest) ProtoMessage() {}
+
+func (x *GetValidAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetValidAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*GetValidAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *GetValidAuthorizationsRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *GetValidAuthorizationsRequest) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+func (x *GetValidAuthorizationsRequest) GetNow() int64 {
+ if x != nil {
+ return x.Now
+ }
+ return 0
+}
+
+type ValidAuthorizations struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Valid []*ValidAuthorizations_MapElement `protobuf:"bytes,1,rep,name=valid,proto3" json:"valid,omitempty"`
+}
+
+func (x *ValidAuthorizations) Reset() {
+ *x = ValidAuthorizations{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidAuthorizations) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidAuthorizations) ProtoMessage() {}
+
+func (x *ValidAuthorizations) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidAuthorizations.ProtoReflect.Descriptor instead.
+func (*ValidAuthorizations) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ValidAuthorizations) GetValid() []*ValidAuthorizations_MapElement {
+ if x != nil {
+ return x.Valid
+ }
+ return nil
+}
+
+type Serial struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+}
+
+func (x *Serial) Reset() {
+ *x = Serial{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Serial) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Serial) ProtoMessage() {}
+
+func (x *Serial) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Serial.ProtoReflect.Descriptor instead.
+func (*Serial) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Serial) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+type SerialMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+ RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds)
+ Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *SerialMetadata) Reset() {
+ *x = SerialMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SerialMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SerialMetadata) ProtoMessage() {}
+
+func (x *SerialMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SerialMetadata.ProtoReflect.Descriptor instead.
+func (*SerialMetadata) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *SerialMetadata) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *SerialMetadata) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *SerialMetadata) GetCreated() int64 {
+ if x != nil {
+ return x.Created
+ }
+ return 0
+}
+
+func (x *SerialMetadata) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+type Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Earliest int64 `protobuf:"varint,1,opt,name=earliest,proto3" json:"earliest,omitempty"` // Unix timestamp (nanoseconds)
+ Latest int64 `protobuf:"varint,2,opt,name=latest,proto3" json:"latest,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *Range) Reset() {
+ *x = Range{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Range) ProtoMessage() {}
+
+func (x *Range) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Range.ProtoReflect.Descriptor instead.
+func (*Range) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Range) GetEarliest() int64 {
+ if x != nil {
+ return x.Earliest
+ }
+ return 0
+}
+
+func (x *Range) GetLatest() int64 {
+ if x != nil {
+ return x.Latest
+ }
+ return 0
+}
+
+type Count struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *Count) Reset() {
+ *x = Count{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Count) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Count) ProtoMessage() {}
+
+func (x *Count) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Count.ProtoReflect.Descriptor instead.
+func (*Count) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Count) GetCount() int64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+type CountCertificatesByNamesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Range *Range `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"`
+ Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"`
+}
+
+func (x *CountCertificatesByNamesRequest) Reset() {
+ *x = CountCertificatesByNamesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountCertificatesByNamesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountCertificatesByNamesRequest) ProtoMessage() {}
+
+func (x *CountCertificatesByNamesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountCertificatesByNamesRequest.ProtoReflect.Descriptor instead.
+func (*CountCertificatesByNamesRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *CountCertificatesByNamesRequest) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+func (x *CountCertificatesByNamesRequest) GetNames() []string {
+ if x != nil {
+ return x.Names
+ }
+ return nil
+}
+
+type CountByNames struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Counts map[string]int64 `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+}
+
+func (x *CountByNames) Reset() {
+ *x = CountByNames{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountByNames) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountByNames) ProtoMessage() {}
+
+func (x *CountByNames) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountByNames.ProtoReflect.Descriptor instead.
+func (*CountByNames) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *CountByNames) GetCounts() map[string]int64 {
+ if x != nil {
+ return x.Counts
+ }
+ return nil
+}
+
+type CountRegistrationsByIPRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"`
+ Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *CountRegistrationsByIPRequest) Reset() {
+ *x = CountRegistrationsByIPRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountRegistrationsByIPRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountRegistrationsByIPRequest) ProtoMessage() {}
+
+func (x *CountRegistrationsByIPRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountRegistrationsByIPRequest.ProtoReflect.Descriptor instead.
+func (*CountRegistrationsByIPRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *CountRegistrationsByIPRequest) GetIp() []byte {
+ if x != nil {
+ return x.Ip
+ }
+ return nil
+}
+
+func (x *CountRegistrationsByIPRequest) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+type CountInvalidAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ // Count authorizations that expire in this range.
+ Range *Range `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *CountInvalidAuthorizationsRequest) Reset() {
+ *x = CountInvalidAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountInvalidAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountInvalidAuthorizationsRequest) ProtoMessage() {}
+
+func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountInvalidAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*CountInvalidAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *CountInvalidAuthorizationsRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *CountInvalidAuthorizationsRequest) GetHostname() string {
+ if x != nil {
+ return x.Hostname
+ }
+ return ""
+}
+
+func (x *CountInvalidAuthorizationsRequest) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+type CountOrdersRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AccountID int64 `protobuf:"varint,1,opt,name=accountID,proto3" json:"accountID,omitempty"`
+ Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *CountOrdersRequest) Reset() {
+ *x = CountOrdersRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountOrdersRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountOrdersRequest) ProtoMessage() {}
+
+func (x *CountOrdersRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountOrdersRequest.ProtoReflect.Descriptor instead.
+func (*CountOrdersRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *CountOrdersRequest) GetAccountID() int64 {
+ if x != nil {
+ return x.AccountID
+ }
+ return 0
+}
+
+func (x *CountOrdersRequest) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+type CountFQDNSetsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Window int64 `protobuf:"varint,1,opt,name=window,proto3" json:"window,omitempty"`
+ Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
+}
+
+func (x *CountFQDNSetsRequest) Reset() {
+ *x = CountFQDNSetsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountFQDNSetsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountFQDNSetsRequest) ProtoMessage() {}
+
+func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountFQDNSetsRequest.ProtoReflect.Descriptor instead.
+func (*CountFQDNSetsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *CountFQDNSetsRequest) GetWindow() int64 {
+ if x != nil {
+ return x.Window
+ }
+ return 0
+}
+
+func (x *CountFQDNSetsRequest) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+type FQDNSetExistsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"`
+}
+
+func (x *FQDNSetExistsRequest) Reset() {
+ *x = FQDNSetExistsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FQDNSetExistsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FQDNSetExistsRequest) ProtoMessage() {}
+
+func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FQDNSetExistsRequest.ProtoReflect.Descriptor instead.
+func (*FQDNSetExistsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *FQDNSetExistsRequest) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+type PreviousCertificateExistsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
+ RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"`
+}
+
+func (x *PreviousCertificateExistsRequest) Reset() {
+ *x = PreviousCertificateExistsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PreviousCertificateExistsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreviousCertificateExistsRequest) ProtoMessage() {}
+
+func (x *PreviousCertificateExistsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreviousCertificateExistsRequest.ProtoReflect.Descriptor instead.
+func (*PreviousCertificateExistsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *PreviousCertificateExistsRequest) GetDomain() string {
+ if x != nil {
+ return x.Domain
+ }
+ return ""
+}
+
+func (x *PreviousCertificateExistsRequest) GetRegID() int64 {
+ if x != nil {
+ return x.RegID
+ }
+ return 0
+}
+
+type Exists struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"`
+}
+
+func (x *Exists) Reset() {
+ *x = Exists{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Exists) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exists) ProtoMessage() {}
+
+func (x *Exists) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exists.ProtoReflect.Descriptor instead.
+func (*Exists) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *Exists) GetExists() bool {
+ if x != nil {
+ return x.Exists
+ }
+ return false
+}
+
+type AddSerialRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegID int64 `protobuf:"varint,1,opt,name=regID,proto3" json:"regID,omitempty"`
+ Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
+ Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds)
+ Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *AddSerialRequest) Reset() {
+ *x = AddSerialRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddSerialRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddSerialRequest) ProtoMessage() {}
+
+func (x *AddSerialRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddSerialRequest.ProtoReflect.Descriptor instead.
+func (*AddSerialRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *AddSerialRequest) GetRegID() int64 {
+ if x != nil {
+ return x.RegID
+ }
+ return 0
+}
+
+func (x *AddSerialRequest) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *AddSerialRequest) GetCreated() int64 {
+ if x != nil {
+ return x.Created
+ }
+ return 0
+}
+
+func (x *AddSerialRequest) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+type AddCertificateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"`
+ RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"`
+ // A signed OCSP response for the certificate contained in "der".
+ // Note: The certificate status in the OCSP response is assumed to be 0 (good).
+ Ocsp []byte `protobuf:"bytes,3,opt,name=ocsp,proto3" json:"ocsp,omitempty"`
+ // An issued time. When not present the SA defaults to using
+ // the current time. The orphan-finder uses this parameter to add
+ // certificates with the correct historic issued date
+ Issued int64 `protobuf:"varint,4,opt,name=issued,proto3" json:"issued,omitempty"`
+ IssuerID int64 `protobuf:"varint,5,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
+}
+
+func (x *AddCertificateRequest) Reset() {
+ *x = AddCertificateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddCertificateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddCertificateRequest) ProtoMessage() {}
+
+func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddCertificateRequest.ProtoReflect.Descriptor instead.
+func (*AddCertificateRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *AddCertificateRequest) GetDer() []byte {
+ if x != nil {
+ return x.Der
+ }
+ return nil
+}
+
+func (x *AddCertificateRequest) GetRegID() int64 {
+ if x != nil {
+ return x.RegID
+ }
+ return 0
+}
+
+func (x *AddCertificateRequest) GetOcsp() []byte {
+ if x != nil {
+ return x.Ocsp
+ }
+ return nil
+}
+
+func (x *AddCertificateRequest) GetIssued() int64 {
+ if x != nil {
+ return x.Issued
+ }
+ return 0
+}
+
+func (x *AddCertificateRequest) GetIssuerID() int64 {
+ if x != nil {
+ return x.IssuerID
+ }
+ return 0
+}
+
+type AddCertificateResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
+}
+
+func (x *AddCertificateResponse) Reset() {
+ *x = AddCertificateResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddCertificateResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddCertificateResponse) ProtoMessage() {}
+
+func (x *AddCertificateResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddCertificateResponse.ProtoReflect.Descriptor instead.
+func (*AddCertificateResponse) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *AddCertificateResponse) GetDigest() string {
+ if x != nil {
+ return x.Digest
+ }
+ return ""
+}
+
+type OrderRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *OrderRequest) Reset() {
+ *x = OrderRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OrderRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OrderRequest) ProtoMessage() {}
+
+func (x *OrderRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OrderRequest.ProtoReflect.Descriptor instead.
+func (*OrderRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *OrderRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type NewOrderRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
+ Names []string `protobuf:"bytes,3,rep,name=names,proto3" json:"names,omitempty"`
+ V2Authorizations []int64 `protobuf:"varint,4,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
+}
+
+func (x *NewOrderRequest) Reset() {
+ *x = NewOrderRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NewOrderRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NewOrderRequest) ProtoMessage() {}
+
+func (x *NewOrderRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead.
+func (*NewOrderRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *NewOrderRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *NewOrderRequest) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+func (x *NewOrderRequest) GetNames() []string {
+ if x != nil {
+ return x.Names
+ }
+ return nil
+}
+
+func (x *NewOrderRequest) GetV2Authorizations() []int64 {
+ if x != nil {
+ return x.V2Authorizations
+ }
+ return nil
+}
+
+type NewOrderAndAuthzsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NewOrder *NewOrderRequest `protobuf:"bytes,1,opt,name=newOrder,proto3" json:"newOrder,omitempty"`
+ NewAuthzs []*proto.Authorization `protobuf:"bytes,2,rep,name=newAuthzs,proto3" json:"newAuthzs,omitempty"`
+}
+
+func (x *NewOrderAndAuthzsRequest) Reset() {
+ *x = NewOrderAndAuthzsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NewOrderAndAuthzsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NewOrderAndAuthzsRequest) ProtoMessage() {}
+
+func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NewOrderAndAuthzsRequest.ProtoReflect.Descriptor instead.
+func (*NewOrderAndAuthzsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *NewOrderAndAuthzsRequest) GetNewOrder() *NewOrderRequest {
+ if x != nil {
+ return x.NewOrder
+ }
+ return nil
+}
+
+func (x *NewOrderAndAuthzsRequest) GetNewAuthzs() []*proto.Authorization {
+ if x != nil {
+ return x.NewAuthzs
+ }
+ return nil
+}
+
+type SetOrderErrorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Error *proto.ProblemDetails `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *SetOrderErrorRequest) Reset() {
+ *x = SetOrderErrorRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetOrderErrorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetOrderErrorRequest) ProtoMessage() {}
+
+func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetOrderErrorRequest.ProtoReflect.Descriptor instead.
+func (*SetOrderErrorRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *SetOrderErrorRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *SetOrderErrorRequest) GetError() *proto.ProblemDetails {
+ if x != nil {
+ return x.Error
+ }
+ return nil
+}
+
+type GetValidOrderAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ AcctID int64 `protobuf:"varint,2,opt,name=acctID,proto3" json:"acctID,omitempty"`
+}
+
+func (x *GetValidOrderAuthorizationsRequest) Reset() {
+ *x = GetValidOrderAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetValidOrderAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetValidOrderAuthorizationsRequest) ProtoMessage() {}
+
+func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetValidOrderAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*GetValidOrderAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *GetValidOrderAuthorizationsRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *GetValidOrderAuthorizationsRequest) GetAcctID() int64 {
+ if x != nil {
+ return x.AcctID
+ }
+ return 0
+}
+
+type GetOrderForNamesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AcctID int64 `protobuf:"varint,1,opt,name=acctID,proto3" json:"acctID,omitempty"`
+ Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"`
+}
+
+func (x *GetOrderForNamesRequest) Reset() {
+ *x = GetOrderForNamesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetOrderForNamesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetOrderForNamesRequest) ProtoMessage() {}
+
+func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetOrderForNamesRequest.ProtoReflect.Descriptor instead.
+func (*GetOrderForNamesRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{27}
+}
+
+func (x *GetOrderForNamesRequest) GetAcctID() int64 {
+ if x != nil {
+ return x.AcctID
+ }
+ return 0
+}
+
+func (x *GetOrderForNamesRequest) GetNames() []string {
+ if x != nil {
+ return x.Names
+ }
+ return nil
+}
+
+type FinalizeOrderRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ CertificateSerial string `protobuf:"bytes,2,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"`
+}
+
+func (x *FinalizeOrderRequest) Reset() {
+ *x = FinalizeOrderRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FinalizeOrderRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FinalizeOrderRequest) ProtoMessage() {}
+
+func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FinalizeOrderRequest.ProtoReflect.Descriptor instead.
+func (*FinalizeOrderRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *FinalizeOrderRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *FinalizeOrderRequest) GetCertificateSerial() string {
+ if x != nil {
+ return x.CertificateSerial
+ }
+ return ""
+}
+
+type GetAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
+ Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *GetAuthorizationsRequest) Reset() {
+ *x = GetAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetAuthorizationsRequest) ProtoMessage() {}
+
+func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*GetAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *GetAuthorizationsRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *GetAuthorizationsRequest) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+func (x *GetAuthorizationsRequest) GetNow() int64 {
+ if x != nil {
+ return x.Now
+ }
+ return 0
+}
+
+type Authorizations struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Authz []*Authorizations_MapElement `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"`
+}
+
+func (x *Authorizations) Reset() {
+ *x = Authorizations{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authorizations) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authorizations) ProtoMessage() {}
+
+func (x *Authorizations) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authorizations.ProtoReflect.Descriptor instead.
+func (*Authorizations) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *Authorizations) GetAuthz() []*Authorizations_MapElement {
+ if x != nil {
+ return x.Authz
+ }
+ return nil
+}
+
+type AddPendingAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Authz []*proto.Authorization `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"`
+}
+
+func (x *AddPendingAuthorizationsRequest) Reset() {
+ *x = AddPendingAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddPendingAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddPendingAuthorizationsRequest) ProtoMessage() {}
+
+func (x *AddPendingAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddPendingAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*AddPendingAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{31}
+}
+
+func (x *AddPendingAuthorizationsRequest) GetAuthz() []*proto.Authorization {
+ if x != nil {
+ return x.Authz
+ }
+ return nil
+}
+
+type AuthorizationIDs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
+}
+
+func (x *AuthorizationIDs) Reset() {
+ *x = AuthorizationIDs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AuthorizationIDs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AuthorizationIDs) ProtoMessage() {}
+
+func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AuthorizationIDs.ProtoReflect.Descriptor instead.
+func (*AuthorizationIDs) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *AuthorizationIDs) GetIds() []string {
+ if x != nil {
+ return x.Ids
+ }
+ return nil
+}
+
+type AuthorizationID2 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *AuthorizationID2) Reset() {
+ *x = AuthorizationID2{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AuthorizationID2) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AuthorizationID2) ProtoMessage() {}
+
+func (x *AuthorizationID2) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AuthorizationID2.ProtoReflect.Descriptor instead.
+func (*AuthorizationID2) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *AuthorizationID2) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type Authorization2IDs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ids []int64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"`
+}
+
+func (x *Authorization2IDs) Reset() {
+ *x = Authorization2IDs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authorization2IDs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authorization2IDs) ProtoMessage() {}
+
+func (x *Authorization2IDs) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authorization2IDs.ProtoReflect.Descriptor instead.
+func (*Authorization2IDs) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{34}
+}
+
+func (x *Authorization2IDs) GetIds() []int64 {
+ if x != nil {
+ return x.Ids
+ }
+ return nil
+}
+
+type RevokeCertificateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+ Reason int64 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"`
+ Date int64 `protobuf:"varint,3,opt,name=date,proto3" json:"date,omitempty"` // Unix timestamp (nanoseconds)
+ Backdate int64 `protobuf:"varint,5,opt,name=backdate,proto3" json:"backdate,omitempty"` // Unix timestamp (nanoseconds)
+ Response []byte `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"`
+}
+
+func (x *RevokeCertificateRequest) Reset() {
+ *x = RevokeCertificateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RevokeCertificateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RevokeCertificateRequest) ProtoMessage() {}
+
+func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RevokeCertificateRequest.ProtoReflect.Descriptor instead.
+func (*RevokeCertificateRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *RevokeCertificateRequest) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *RevokeCertificateRequest) GetReason() int64 {
+ if x != nil {
+ return x.Reason
+ }
+ return 0
+}
+
+func (x *RevokeCertificateRequest) GetDate() int64 {
+ if x != nil {
+ return x.Date
+ }
+ return 0
+}
+
+func (x *RevokeCertificateRequest) GetBackdate() int64 {
+ if x != nil {
+ return x.Backdate
+ }
+ return 0
+}
+
+func (x *RevokeCertificateRequest) GetResponse() []byte {
+ if x != nil {
+ return x.Response
+ }
+ return nil
+}
+
+type FinalizeAuthorizationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+ Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+ Attempted string `protobuf:"bytes,4,opt,name=attempted,proto3" json:"attempted,omitempty"`
+ ValidationRecords []*proto.ValidationRecord `protobuf:"bytes,5,rep,name=validationRecords,proto3" json:"validationRecords,omitempty"`
+ ValidationError *proto.ProblemDetails `protobuf:"bytes,6,opt,name=validationError,proto3" json:"validationError,omitempty"`
+ AttemptedAt int64 `protobuf:"varint,7,opt,name=attemptedAt,proto3" json:"attemptedAt,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *FinalizeAuthorizationRequest) Reset() {
+ *x = FinalizeAuthorizationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FinalizeAuthorizationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FinalizeAuthorizationRequest) ProtoMessage() {}
+
+func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FinalizeAuthorizationRequest.ProtoReflect.Descriptor instead.
+func (*FinalizeAuthorizationRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{36}
+}
+
+func (x *FinalizeAuthorizationRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *FinalizeAuthorizationRequest) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *FinalizeAuthorizationRequest) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+func (x *FinalizeAuthorizationRequest) GetAttempted() string {
+ if x != nil {
+ return x.Attempted
+ }
+ return ""
+}
+
+func (x *FinalizeAuthorizationRequest) GetValidationRecords() []*proto.ValidationRecord {
+ if x != nil {
+ return x.ValidationRecords
+ }
+ return nil
+}
+
+func (x *FinalizeAuthorizationRequest) GetValidationError() *proto.ProblemDetails {
+ if x != nil {
+ return x.ValidationError
+ }
+ return nil
+}
+
+func (x *FinalizeAuthorizationRequest) GetAttemptedAt() int64 {
+ if x != nil {
+ return x.AttemptedAt
+ }
+ return 0
+}
+
+type AddBlockedKeyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"`
+ Added int64 `protobuf:"varint,2,opt,name=added,proto3" json:"added,omitempty"` // Unix timestamp (nanoseconds)
+ Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
+ Comment string `protobuf:"bytes,4,opt,name=comment,proto3" json:"comment,omitempty"`
+ RevokedBy int64 `protobuf:"varint,5,opt,name=revokedBy,proto3" json:"revokedBy,omitempty"`
+}
+
+func (x *AddBlockedKeyRequest) Reset() {
+ *x = AddBlockedKeyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddBlockedKeyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddBlockedKeyRequest) ProtoMessage() {}
+
+func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddBlockedKeyRequest.ProtoReflect.Descriptor instead.
+func (*AddBlockedKeyRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{37}
+}
+
+func (x *AddBlockedKeyRequest) GetKeyHash() []byte {
+ if x != nil {
+ return x.KeyHash
+ }
+ return nil
+}
+
+func (x *AddBlockedKeyRequest) GetAdded() int64 {
+ if x != nil {
+ return x.Added
+ }
+ return 0
+}
+
+func (x *AddBlockedKeyRequest) GetSource() string {
+ if x != nil {
+ return x.Source
+ }
+ return ""
+}
+
+func (x *AddBlockedKeyRequest) GetComment() string {
+ if x != nil {
+ return x.Comment
+ }
+ return ""
+}
+
+func (x *AddBlockedKeyRequest) GetRevokedBy() int64 {
+ if x != nil {
+ return x.RevokedBy
+ }
+ return 0
+}
+
+type KeyBlockedRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"`
+}
+
+func (x *KeyBlockedRequest) Reset() {
+ *x = KeyBlockedRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeyBlockedRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyBlockedRequest) ProtoMessage() {}
+
+func (x *KeyBlockedRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyBlockedRequest.ProtoReflect.Descriptor instead.
+func (*KeyBlockedRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{38}
+}
+
+func (x *KeyBlockedRequest) GetKeyHash() []byte {
+ if x != nil {
+ return x.KeyHash
+ }
+ return nil
+}
+
+type ValidAuthorizations_MapElement struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
+ Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"`
+}
+
+func (x *ValidAuthorizations_MapElement) Reset() {
+ *x = ValidAuthorizations_MapElement{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidAuthorizations_MapElement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidAuthorizations_MapElement) ProtoMessage() {}
+
+func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidAuthorizations_MapElement.ProtoReflect.Descriptor instead.
+func (*ValidAuthorizations_MapElement) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *ValidAuthorizations_MapElement) GetDomain() string {
+ if x != nil {
+ return x.Domain
+ }
+ return ""
+}
+
+func (x *ValidAuthorizations_MapElement) GetAuthz() *proto.Authorization {
+ if x != nil {
+ return x.Authz
+ }
+ return nil
+}
+
+type Authorizations_MapElement struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
+ Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"`
+}
+
+func (x *Authorizations_MapElement) Reset() {
+ *x = Authorizations_MapElement{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authorizations_MapElement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authorizations_MapElement) ProtoMessage() {}
+
+func (x *Authorizations_MapElement) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authorizations_MapElement.ProtoReflect.Descriptor instead.
+func (*Authorizations_MapElement) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{30, 0}
+}
+
+func (x *Authorizations_MapElement) GetDomain() string {
+ if x != nil {
+ return x.Domain
+ }
+ return ""
+}
+
+func (x *Authorizations_MapElement) GetAuthz() *proto.Authorization {
+ if x != nil {
+ return x.Authz
+ }
+ return nil
+}
+
+var File_sa_proto protoreflect.FileDescriptor
+
+var file_sa_proto_rawDesc = []byte{
+ 0x0a, 0x08, 0x73, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x73, 0x61, 0x1a, 0x15,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0x20, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b,
+ 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6a, 0x77, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x03, 0x6a, 0x77, 0x6b, 0x22, 0x21, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0xba, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x50,
+ 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65,
+ 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x69, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, 0x6e, 0x74,
+ 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55,
+ 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x73, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a,
+ 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07,
+ 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x1a, 0x4f, 0x0a, 0x0a, 0x4d,
+ 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d,
+ 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69,
+ 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x20, 0x0a, 0x06,
+ 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x84,
+ 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x44, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65,
+ 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0x3b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1a,
+ 0x0a, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65,
+ 0x73, 0x74, 0x22, 0x1d, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x22, 0x58, 0x0a, 0x1f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x7f, 0x0a, 0x0c, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x61,
+ 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x2e, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1d,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x70, 0x12, 0x1f, 0x0a,
+ 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73,
+ 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x88,
+ 0x01, 0x0a, 0x21, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65,
+ 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08,
+ 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e,
+ 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x53, 0x0a, 0x12, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1c, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x1f, 0x0a,
+ 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73,
+ 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x48,
+ 0x0a, 0x14, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18,
+ 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x30, 0x0a, 0x14, 0x46, 0x51, 0x44, 0x4e,
+ 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x20, 0x50, 0x72,
+ 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16,
+ 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x22, 0x20, 0x0a, 0x06,
+ 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x74,
+ 0x0a, 0x10, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69,
+ 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
+ 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10,
+ 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72,
+ 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73,
+ 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75,
+ 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0x30,
+ 0x0a, 0x16, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65,
+ 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74,
+ 0x22, 0x1e, 0x0a, 0x0c, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64,
+ 0x22, 0x95, 0x01, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65,
+ 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07,
+ 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65,
+ 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10,
+ 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x04, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x7e, 0x0a, 0x18, 0x4e, 0x65, 0x77, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77,
+ 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68,
+ 0x7a, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6e,
+ 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x22, 0x52, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64,
+ 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65,
+ 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4c, 0x0a, 0x22,
+ 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74,
+ 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x22, 0x47, 0x0a, 0x17, 0x47, 0x65,
+ 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a,
+ 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61,
+ 0x6d, 0x65, 0x73, 0x22, 0x54, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x63,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x6e, 0x0a, 0x18, 0x47, 0x65, 0x74,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a,
+ 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07,
+ 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0x96, 0x01, 0x0a, 0x0e, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x05,
+ 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x61,
+ 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68,
+ 0x7a, 0x1a, 0x4f, 0x0a, 0x0a, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12,
+ 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74,
+ 0x68, 0x7a, 0x22, 0x4c, 0x0a, 0x1f, 0x41, 0x64, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a,
+ 0x22, 0x24, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x22, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, 0x0a, 0x11, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x12,
+ 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x03, 0x69, 0x64,
+ 0x73, 0x22, 0x96, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16,
+ 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12,
+ 0x0a, 0x04, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x64, 0x61,
+ 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa6, 0x02, 0x0a, 0x1c, 0x46,
+ 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x1c, 0x0a,
+ 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73,
+ 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x11,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x73, 0x12, 0x3e, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65,
+ 0x64, 0x41, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07,
+ 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b,
+ 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c,
+ 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x22, 0x2d, 0x0a, 0x11,
+ 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x32, 0xcd, 0x15, 0x0a, 0x10,
+ 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79,
+ 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x3c, 0x0a,
+ 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x57,
+ 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x35, 0x0a, 0x11, 0x47,
+ 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73,
+ 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c,
+ 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x65, 0x63,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e,
+ 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47,
+ 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a,
+ 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x18, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42,
+ 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x61,
+ 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x00, 0x12,
+ 0x48, 0x0a, 0x16, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73,
+ 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x1b, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42,
+ 0x79, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61,
+ 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e,
+ 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45,
+ 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53,
+ 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a,
+ 0x19, 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x61, 0x2e,
+ 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x40,
+ 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00,
+ 0x12, 0x48, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x18, 0x47, 0x65,
+ 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50,
+ 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x00, 0x12, 0x3e, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e,
+ 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32,
+ 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22,
+ 0x00, 0x12, 0x5c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x32, 0x12, 0x26, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12,
+ 0x51, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x25,
+ 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x22, 0x00, 0x12, 0x52, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x21, 0x2e,
+ 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x65, 0x64, 0x12, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61,
+ 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77,
+ 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x41, 0x64,
+ 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73,
+ 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64,
+ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x72, 0x65, 0x63,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73, 0x61, 0x2e,
+ 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12,
+ 0x3b, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x14, 0x2e, 0x73,
+ 0x61, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x16,
+ 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x22, 0x00, 0x12, 0x2e, 0x0a, 0x08, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72,
+ 0x12, 0x13, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72,
+ 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x4e,
+ 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x10, 0x2e, 0x73,
+ 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x53,
+ 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a,
+ 0x0d, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x18,
+ 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65,
+ 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x10,
+ 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12,
+ 0x3e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65,
+ 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12,
+ 0x4b, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65,
+ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x18,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65,
+ 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00,
+ 0x12, 0x52, 0x0a, 0x12, 0x4e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x50,
+ 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x61,
+ 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49,
+ 0x44, 0x73, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x16, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x20,
+ 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x18, 0x44, 0x65,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x16, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64,
+ 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x29, 0x5a, 0x27, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x73, 0x61,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_sa_proto_rawDescOnce sync.Once
+ file_sa_proto_rawDescData = file_sa_proto_rawDesc
+)
+
+func file_sa_proto_rawDescGZIP() []byte {
+ file_sa_proto_rawDescOnce.Do(func() {
+ file_sa_proto_rawDescData = protoimpl.X.CompressGZIP(file_sa_proto_rawDescData)
+ })
+ return file_sa_proto_rawDescData
+}
+
+var file_sa_proto_msgTypes = make([]protoimpl.MessageInfo, 42)
+var file_sa_proto_goTypes = []interface{}{
+ (*RegistrationID)(nil), // 0: sa.RegistrationID
+ (*JSONWebKey)(nil), // 1: sa.JSONWebKey
+ (*AuthorizationID)(nil), // 2: sa.AuthorizationID
+ (*GetPendingAuthorizationRequest)(nil), // 3: sa.GetPendingAuthorizationRequest
+ (*GetValidAuthorizationsRequest)(nil), // 4: sa.GetValidAuthorizationsRequest
+ (*ValidAuthorizations)(nil), // 5: sa.ValidAuthorizations
+ (*Serial)(nil), // 6: sa.Serial
+ (*SerialMetadata)(nil), // 7: sa.SerialMetadata
+ (*Range)(nil), // 8: sa.Range
+ (*Count)(nil), // 9: sa.Count
+ (*CountCertificatesByNamesRequest)(nil), // 10: sa.CountCertificatesByNamesRequest
+ (*CountByNames)(nil), // 11: sa.CountByNames
+ (*CountRegistrationsByIPRequest)(nil), // 12: sa.CountRegistrationsByIPRequest
+ (*CountInvalidAuthorizationsRequest)(nil), // 13: sa.CountInvalidAuthorizationsRequest
+ (*CountOrdersRequest)(nil), // 14: sa.CountOrdersRequest
+ (*CountFQDNSetsRequest)(nil), // 15: sa.CountFQDNSetsRequest
+ (*FQDNSetExistsRequest)(nil), // 16: sa.FQDNSetExistsRequest
+ (*PreviousCertificateExistsRequest)(nil), // 17: sa.PreviousCertificateExistsRequest
+ (*Exists)(nil), // 18: sa.Exists
+ (*AddSerialRequest)(nil), // 19: sa.AddSerialRequest
+ (*AddCertificateRequest)(nil), // 20: sa.AddCertificateRequest
+ (*AddCertificateResponse)(nil), // 21: sa.AddCertificateResponse
+ (*OrderRequest)(nil), // 22: sa.OrderRequest
+ (*NewOrderRequest)(nil), // 23: sa.NewOrderRequest
+ (*NewOrderAndAuthzsRequest)(nil), // 24: sa.NewOrderAndAuthzsRequest
+ (*SetOrderErrorRequest)(nil), // 25: sa.SetOrderErrorRequest
+ (*GetValidOrderAuthorizationsRequest)(nil), // 26: sa.GetValidOrderAuthorizationsRequest
+ (*GetOrderForNamesRequest)(nil), // 27: sa.GetOrderForNamesRequest
+ (*FinalizeOrderRequest)(nil), // 28: sa.FinalizeOrderRequest
+ (*GetAuthorizationsRequest)(nil), // 29: sa.GetAuthorizationsRequest
+ (*Authorizations)(nil), // 30: sa.Authorizations
+ (*AddPendingAuthorizationsRequest)(nil), // 31: sa.AddPendingAuthorizationsRequest
+ (*AuthorizationIDs)(nil), // 32: sa.AuthorizationIDs
+ (*AuthorizationID2)(nil), // 33: sa.AuthorizationID2
+ (*Authorization2IDs)(nil), // 34: sa.Authorization2IDs
+ (*RevokeCertificateRequest)(nil), // 35: sa.RevokeCertificateRequest
+ (*FinalizeAuthorizationRequest)(nil), // 36: sa.FinalizeAuthorizationRequest
+ (*AddBlockedKeyRequest)(nil), // 37: sa.AddBlockedKeyRequest
+ (*KeyBlockedRequest)(nil), // 38: sa.KeyBlockedRequest
+ (*ValidAuthorizations_MapElement)(nil), // 39: sa.ValidAuthorizations.MapElement
+ nil, // 40: sa.CountByNames.CountsEntry
+ (*Authorizations_MapElement)(nil), // 41: sa.Authorizations.MapElement
+ (*proto.Authorization)(nil), // 42: core.Authorization
+ (*proto.ProblemDetails)(nil), // 43: core.ProblemDetails
+ (*proto.ValidationRecord)(nil), // 44: core.ValidationRecord
+ (*proto.Registration)(nil), // 45: core.Registration
+ (*proto.Certificate)(nil), // 46: core.Certificate
+ (*proto.CertificateStatus)(nil), // 47: core.CertificateStatus
+ (*emptypb.Empty)(nil), // 48: google.protobuf.Empty
+ (*proto.Order)(nil), // 49: core.Order
+}
+var file_sa_proto_depIdxs = []int32{
+ 39, // 0: sa.ValidAuthorizations.valid:type_name -> sa.ValidAuthorizations.MapElement
+ 8, // 1: sa.CountCertificatesByNamesRequest.range:type_name -> sa.Range
+ 40, // 2: sa.CountByNames.counts:type_name -> sa.CountByNames.CountsEntry
+ 8, // 3: sa.CountRegistrationsByIPRequest.range:type_name -> sa.Range
+ 8, // 4: sa.CountInvalidAuthorizationsRequest.range:type_name -> sa.Range
+ 8, // 5: sa.CountOrdersRequest.range:type_name -> sa.Range
+ 23, // 6: sa.NewOrderAndAuthzsRequest.newOrder:type_name -> sa.NewOrderRequest
+ 42, // 7: sa.NewOrderAndAuthzsRequest.newAuthzs:type_name -> core.Authorization
+ 43, // 8: sa.SetOrderErrorRequest.error:type_name -> core.ProblemDetails
+ 41, // 9: sa.Authorizations.authz:type_name -> sa.Authorizations.MapElement
+ 42, // 10: sa.AddPendingAuthorizationsRequest.authz:type_name -> core.Authorization
+ 44, // 11: sa.FinalizeAuthorizationRequest.validationRecords:type_name -> core.ValidationRecord
+ 43, // 12: sa.FinalizeAuthorizationRequest.validationError:type_name -> core.ProblemDetails
+ 42, // 13: sa.ValidAuthorizations.MapElement.authz:type_name -> core.Authorization
+ 42, // 14: sa.Authorizations.MapElement.authz:type_name -> core.Authorization
+ 0, // 15: sa.StorageAuthority.GetRegistration:input_type -> sa.RegistrationID
+ 1, // 16: sa.StorageAuthority.GetRegistrationByKey:input_type -> sa.JSONWebKey
+ 6, // 17: sa.StorageAuthority.GetSerialMetadata:input_type -> sa.Serial
+ 6, // 18: sa.StorageAuthority.GetCertificate:input_type -> sa.Serial
+ 6, // 19: sa.StorageAuthority.GetPrecertificate:input_type -> sa.Serial
+ 6, // 20: sa.StorageAuthority.GetCertificateStatus:input_type -> sa.Serial
+ 10, // 21: sa.StorageAuthority.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest
+ 12, // 22: sa.StorageAuthority.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest
+ 12, // 23: sa.StorageAuthority.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest
+ 14, // 24: sa.StorageAuthority.CountOrders:input_type -> sa.CountOrdersRequest
+ 15, // 25: sa.StorageAuthority.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest
+ 16, // 26: sa.StorageAuthority.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest
+ 17, // 27: sa.StorageAuthority.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest
+ 33, // 28: sa.StorageAuthority.GetAuthorization2:input_type -> sa.AuthorizationID2
+ 29, // 29: sa.StorageAuthority.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest
+ 3, // 30: sa.StorageAuthority.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest
+ 0, // 31: sa.StorageAuthority.CountPendingAuthorizations2:input_type -> sa.RegistrationID
+ 26, // 32: sa.StorageAuthority.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest
+ 13, // 33: sa.StorageAuthority.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest
+ 4, // 34: sa.StorageAuthority.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest
+ 38, // 35: sa.StorageAuthority.KeyBlocked:input_type -> sa.KeyBlockedRequest
+ 45, // 36: sa.StorageAuthority.NewRegistration:input_type -> core.Registration
+ 45, // 37: sa.StorageAuthority.UpdateRegistration:input_type -> core.Registration
+ 20, // 38: sa.StorageAuthority.AddCertificate:input_type -> sa.AddCertificateRequest
+ 20, // 39: sa.StorageAuthority.AddPrecertificate:input_type -> sa.AddCertificateRequest
+ 19, // 40: sa.StorageAuthority.AddSerial:input_type -> sa.AddSerialRequest
+ 0, // 41: sa.StorageAuthority.DeactivateRegistration:input_type -> sa.RegistrationID
+ 23, // 42: sa.StorageAuthority.NewOrder:input_type -> sa.NewOrderRequest
+ 24, // 43: sa.StorageAuthority.NewOrderAndAuthzs:input_type -> sa.NewOrderAndAuthzsRequest
+ 22, // 44: sa.StorageAuthority.SetOrderProcessing:input_type -> sa.OrderRequest
+ 25, // 45: sa.StorageAuthority.SetOrderError:input_type -> sa.SetOrderErrorRequest
+ 28, // 46: sa.StorageAuthority.FinalizeOrder:input_type -> sa.FinalizeOrderRequest
+ 22, // 47: sa.StorageAuthority.GetOrder:input_type -> sa.OrderRequest
+ 27, // 48: sa.StorageAuthority.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest
+ 35, // 49: sa.StorageAuthority.RevokeCertificate:input_type -> sa.RevokeCertificateRequest
+ 35, // 50: sa.StorageAuthority.UpdateRevokedCertificate:input_type -> sa.RevokeCertificateRequest
+ 31, // 51: sa.StorageAuthority.NewAuthorizations2:input_type -> sa.AddPendingAuthorizationsRequest
+ 36, // 52: sa.StorageAuthority.FinalizeAuthorization2:input_type -> sa.FinalizeAuthorizationRequest
+ 33, // 53: sa.StorageAuthority.DeactivateAuthorization2:input_type -> sa.AuthorizationID2
+ 37, // 54: sa.StorageAuthority.AddBlockedKey:input_type -> sa.AddBlockedKeyRequest
+ 45, // 55: sa.StorageAuthority.GetRegistration:output_type -> core.Registration
+ 45, // 56: sa.StorageAuthority.GetRegistrationByKey:output_type -> core.Registration
+ 7, // 57: sa.StorageAuthority.GetSerialMetadata:output_type -> sa.SerialMetadata
+ 46, // 58: sa.StorageAuthority.GetCertificate:output_type -> core.Certificate
+ 46, // 59: sa.StorageAuthority.GetPrecertificate:output_type -> core.Certificate
+ 47, // 60: sa.StorageAuthority.GetCertificateStatus:output_type -> core.CertificateStatus
+ 11, // 61: sa.StorageAuthority.CountCertificatesByNames:output_type -> sa.CountByNames
+ 9, // 62: sa.StorageAuthority.CountRegistrationsByIP:output_type -> sa.Count
+ 9, // 63: sa.StorageAuthority.CountRegistrationsByIPRange:output_type -> sa.Count
+ 9, // 64: sa.StorageAuthority.CountOrders:output_type -> sa.Count
+ 9, // 65: sa.StorageAuthority.CountFQDNSets:output_type -> sa.Count
+ 18, // 66: sa.StorageAuthority.FQDNSetExists:output_type -> sa.Exists
+ 18, // 67: sa.StorageAuthority.PreviousCertificateExists:output_type -> sa.Exists
+ 42, // 68: sa.StorageAuthority.GetAuthorization2:output_type -> core.Authorization
+ 30, // 69: sa.StorageAuthority.GetAuthorizations2:output_type -> sa.Authorizations
+ 42, // 70: sa.StorageAuthority.GetPendingAuthorization2:output_type -> core.Authorization
+ 9, // 71: sa.StorageAuthority.CountPendingAuthorizations2:output_type -> sa.Count
+ 30, // 72: sa.StorageAuthority.GetValidOrderAuthorizations2:output_type -> sa.Authorizations
+ 9, // 73: sa.StorageAuthority.CountInvalidAuthorizations2:output_type -> sa.Count
+ 30, // 74: sa.StorageAuthority.GetValidAuthorizations2:output_type -> sa.Authorizations
+ 18, // 75: sa.StorageAuthority.KeyBlocked:output_type -> sa.Exists
+ 45, // 76: sa.StorageAuthority.NewRegistration:output_type -> core.Registration
+ 48, // 77: sa.StorageAuthority.UpdateRegistration:output_type -> google.protobuf.Empty
+ 21, // 78: sa.StorageAuthority.AddCertificate:output_type -> sa.AddCertificateResponse
+ 48, // 79: sa.StorageAuthority.AddPrecertificate:output_type -> google.protobuf.Empty
+ 48, // 80: sa.StorageAuthority.AddSerial:output_type -> google.protobuf.Empty
+ 48, // 81: sa.StorageAuthority.DeactivateRegistration:output_type -> google.protobuf.Empty
+ 49, // 82: sa.StorageAuthority.NewOrder:output_type -> core.Order
+ 49, // 83: sa.StorageAuthority.NewOrderAndAuthzs:output_type -> core.Order
+ 48, // 84: sa.StorageAuthority.SetOrderProcessing:output_type -> google.protobuf.Empty
+ 48, // 85: sa.StorageAuthority.SetOrderError:output_type -> google.protobuf.Empty
+ 48, // 86: sa.StorageAuthority.FinalizeOrder:output_type -> google.protobuf.Empty
+ 49, // 87: sa.StorageAuthority.GetOrder:output_type -> core.Order
+ 49, // 88: sa.StorageAuthority.GetOrderForNames:output_type -> core.Order
+ 48, // 89: sa.StorageAuthority.RevokeCertificate:output_type -> google.protobuf.Empty
+ 48, // 90: sa.StorageAuthority.UpdateRevokedCertificate:output_type -> google.protobuf.Empty
+ 34, // 91: sa.StorageAuthority.NewAuthorizations2:output_type -> sa.Authorization2IDs
+ 48, // 92: sa.StorageAuthority.FinalizeAuthorization2:output_type -> google.protobuf.Empty
+ 48, // 93: sa.StorageAuthority.DeactivateAuthorization2:output_type -> google.protobuf.Empty
+ 48, // 94: sa.StorageAuthority.AddBlockedKey:output_type -> google.protobuf.Empty
+ 55, // [55:95] is the sub-list for method output_type
+ 15, // [15:55] is the sub-list for method input_type
+ 15, // [15:15] is the sub-list for extension type_name
+ 15, // [15:15] is the sub-list for extension extendee
+ 0, // [0:15] is the sub-list for field type_name
+}
+
+func init() { file_sa_proto_init() }
+func file_sa_proto_init() {
+ if File_sa_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_sa_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegistrationID); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*JSONWebKey); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AuthorizationID); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetPendingAuthorizationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetValidAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidAuthorizations); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Serial); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SerialMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Range); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Count); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountCertificatesByNamesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountByNames); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountRegistrationsByIPRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountInvalidAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountOrdersRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountFQDNSetsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FQDNSetExistsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PreviousCertificateExistsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exists); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddSerialRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddCertificateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddCertificateResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OrderRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NewOrderRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NewOrderAndAuthzsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetOrderErrorRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetValidOrderAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetOrderForNamesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FinalizeOrderRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authorizations); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddPendingAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AuthorizationIDs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AuthorizationID2); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authorization2IDs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RevokeCertificateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FinalizeAuthorizationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddBlockedKeyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeyBlockedRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidAuthorizations_MapElement); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authorizations_MapElement); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_sa_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 42,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_sa_proto_goTypes,
+ DependencyIndexes: file_sa_proto_depIdxs,
+ MessageInfos: file_sa_proto_msgTypes,
+ }.Build()
+ File_sa_proto = out.File
+ file_sa_proto_rawDesc = nil
+ file_sa_proto_goTypes = nil
+ file_sa_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto
new file mode 100644
index 000000000..25d2d6434
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto
@@ -0,0 +1,272 @@
+syntax = "proto3";
+
+package sa;
+option go_package = "github.com/letsencrypt/boulder/sa/proto";
+
+import "core/proto/core.proto";
+import "google/protobuf/empty.proto";
+
+service StorageAuthority {
+ // Getters
+ rpc GetRegistration(RegistrationID) returns (core.Registration) {}
+ rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
+ rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
+ rpc GetCertificate(Serial) returns (core.Certificate) {}
+ rpc GetPrecertificate(Serial) returns (core.Certificate) {}
+ rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
+ rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {}
+ rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {}
+ rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {}
+ rpc CountOrders(CountOrdersRequest) returns (Count) {}
+ // Return a count of authorizations with status "invalid" that belong to
+ // a given registration ID and expire in the given time range.
+ rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
+ rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {}
+ rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
+ rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {}
+ rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {}
+ rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {}
+ rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
+ rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
+ rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
+ rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
+ rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
+ // Adders
+ rpc NewRegistration(core.Registration) returns (core.Registration) {}
+ rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {}
+ rpc AddCertificate(AddCertificateRequest) returns (AddCertificateResponse) {}
+ rpc AddPrecertificate(AddCertificateRequest) returns (google.protobuf.Empty) {}
+ rpc AddSerial(AddSerialRequest) returns (google.protobuf.Empty) {}
+ rpc DeactivateRegistration(RegistrationID) returns (google.protobuf.Empty) {}
+ rpc NewOrder(NewOrderRequest) returns (core.Order) {}
+ rpc NewOrderAndAuthzs(NewOrderAndAuthzsRequest) returns (core.Order) {}
+ rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {}
+ rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {}
+ rpc FinalizeOrder(FinalizeOrderRequest) returns (google.protobuf.Empty) {}
+ rpc GetOrder(OrderRequest) returns (core.Order) {}
+ rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
+ rpc RevokeCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
+ rpc UpdateRevokedCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
+ rpc NewAuthorizations2(AddPendingAuthorizationsRequest) returns (Authorization2IDs) {}
+ rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {}
+ rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {}
+ rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {}
+}
+
+message RegistrationID {
+ int64 id = 1;
+}
+
+message JSONWebKey {
+ bytes jwk = 1;
+}
+
+message AuthorizationID {
+ string id = 1;
+}
+
+message GetPendingAuthorizationRequest {
+ int64 registrationID = 1;
+ string identifierType = 2;
+ string identifierValue = 3;
+ // Result must be valid until at least this Unix timestamp (nanos)
+ int64 validUntil = 4;
+}
+
+message GetValidAuthorizationsRequest {
+ int64 registrationID = 1;
+ repeated string domains = 2;
+ int64 now = 3; // Unix timestamp (nanoseconds)
+}
+
+message ValidAuthorizations {
+ message MapElement {
+ string domain = 1;
+ core.Authorization authz = 2;
+ }
+ repeated MapElement valid = 1;
+}
+
+message Serial {
+ string serial = 1;
+}
+
+message SerialMetadata {
+ string serial = 1;
+ int64 registrationID = 2;
+ int64 created = 3; // Unix timestamp (nanoseconds)
+ int64 expires = 4; // Unix timestamp (nanoseconds)
+}
+
+message Range {
+ int64 earliest = 1; // Unix timestamp (nanoseconds)
+ int64 latest = 2; // Unix timestamp (nanoseconds)
+}
+
+message Count {
+ int64 count = 1;
+}
+
+message CountCertificatesByNamesRequest {
+ Range range = 1;
+ repeated string names = 2;
+}
+
+message CountByNames {
+ map<string, int64> counts = 1;
+}
+
+message CountRegistrationsByIPRequest {
+ bytes ip = 1;
+ Range range = 2;
+}
+
+message CountInvalidAuthorizationsRequest {
+ int64 registrationID = 1;
+ string hostname = 2;
+ // Count authorizations that expire in this range.
+ Range range = 3;
+}
+
+message CountOrdersRequest {
+ int64 accountID = 1;
+ Range range = 2;
+}
+
+message CountFQDNSetsRequest {
+ int64 window = 1;
+ repeated string domains = 2;
+}
+
+message FQDNSetExistsRequest {
+ repeated string domains = 1;
+}
+
+message PreviousCertificateExistsRequest {
+ string domain = 1;
+ int64 regID = 2;
+}
+
+message Exists {
+ bool exists = 1;
+}
+
+message AddSerialRequest {
+ int64 regID = 1;
+ string serial = 2;
+ int64 created = 3; // Unix timestamp (nanoseconds)
+ int64 expires = 4; // Unix timestamp (nanoseconds)
+}
+
+message AddCertificateRequest {
+ bytes der = 1;
+ int64 regID = 2;
+ // A signed OCSP response for the certificate contained in "der".
+ // Note: The certificate status in the OCSP response is assumed to be 0 (good).
+ bytes ocsp = 3;
+ // An issued time. When not present the SA defaults to using
+ // the current time. The orphan-finder uses this parameter to add
+ // certificates with the correct historic issued date
+ int64 issued = 4;
+ int64 issuerID = 5;
+}
+
+message AddCertificateResponse {
+ string digest = 1;
+}
+
+message OrderRequest {
+ int64 id = 1;
+}
+
+message NewOrderRequest {
+ int64 registrationID = 1;
+ int64 expires = 2;
+ repeated string names = 3;
+ repeated int64 v2Authorizations = 4;
+}
+
+message NewOrderAndAuthzsRequest {
+ NewOrderRequest newOrder = 1;
+ repeated core.Authorization newAuthzs = 2;
+}
+
+message SetOrderErrorRequest {
+ int64 id = 1;
+ core.ProblemDetails error = 2;
+}
+
+message GetValidOrderAuthorizationsRequest {
+ int64 id = 1;
+ int64 acctID = 2;
+}
+
+message GetOrderForNamesRequest {
+ int64 acctID = 1;
+ repeated string names = 2;
+}
+
+message FinalizeOrderRequest {
+ int64 id = 1;
+ string certificateSerial = 2;
+}
+
+message GetAuthorizationsRequest {
+ int64 registrationID = 1;
+ repeated string domains = 2;
+ int64 now = 3; // Unix timestamp (nanoseconds)
+}
+
+message Authorizations {
+ message MapElement {
+ string domain = 1;
+ core.Authorization authz = 2;
+ }
+ repeated MapElement authz = 1;
+}
+
+message AddPendingAuthorizationsRequest {
+ repeated core.Authorization authz = 1;
+}
+
+message AuthorizationIDs {
+ repeated string ids = 1;
+}
+
+message AuthorizationID2 {
+ int64 id = 1;
+}
+
+message Authorization2IDs {
+ repeated int64 ids = 1;
+}
+
+message RevokeCertificateRequest {
+ string serial = 1;
+ int64 reason = 2;
+ int64 date = 3; // Unix timestamp (nanoseconds)
+ int64 backdate = 5; // Unix timestamp (nanoseconds)
+ bytes response = 4;
+}
+
+message FinalizeAuthorizationRequest {
+ int64 id = 1;
+ string status = 2;
+ int64 expires = 3; // Unix timestamp (nanoseconds)
+ string attempted = 4;
+ repeated core.ValidationRecord validationRecords = 5;
+ core.ProblemDetails validationError = 6;
+ int64 attemptedAt = 7; // Unix timestamp (nanoseconds)
+}
+
+message AddBlockedKeyRequest {
+ bytes keyHash = 1;
+ int64 added = 2; // Unix timestamp (nanoseconds)
+ string source = 3;
+ string comment = 4;
+ int64 revokedBy = 5;
+}
+
+message KeyBlockedRequest {
+ bytes keyHash = 1;
+}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go
new file mode 100644
index 000000000..3aae5354b
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go
@@ -0,0 +1,1515 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+
+package proto
+
+import (
+ context "context"
+ proto "github.com/letsencrypt/boulder/core/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// StorageAuthorityClient is the client API for StorageAuthority service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type StorageAuthorityClient interface {
+ // Getters
+ GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
+ GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
+ GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error)
+ GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
+ CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error)
+ CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
+ CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
+ CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error)
+ // Return a count of authorizations with status "invalid" that belong to
+ // a given registration ID and expire in the given time range.
+ CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error)
+ FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
+ PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error)
+ GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
+ GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
+ CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
+ GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
+ GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error)
+ // Adders
+ NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error)
+ UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error)
+ AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error)
+ FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+}
+
+type storageAuthorityClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewStorageAuthorityClient(cc grpc.ClientConnInterface) StorageAuthorityClient {
+ return &storageAuthorityClient{cc}
+}
+
+func (c *storageAuthorityClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) {
+ out := new(proto.Registration)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) {
+ out := new(proto.Registration)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistrationByKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) {
+ out := new(SerialMetadata)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetSerialMetadata", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
+ out := new(proto.Certificate)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
+ out := new(proto.Certificate)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPrecertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) {
+ out := new(proto.CertificateStatus)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificateStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) {
+ out := new(CountByNames)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountCertificatesByNames", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIP", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIPRange", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountOrders", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountFQDNSets", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
+ out := new(Exists)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetExists", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
+ out := new(Exists)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/PreviousCertificateExists", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) {
+ out := new(proto.Authorization)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorization2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
+ out := new(Authorizations)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) {
+ out := new(proto.Authorization)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPendingAuthorization2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountPendingAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
+ out := new(Authorizations)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidOrderAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountInvalidAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
+ out := new(Authorizations)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) {
+ out := new(Exists)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/KeyBlocked", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) {
+ out := new(proto.Registration)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewRegistration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRegistration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) {
+ out := new(AddCertificateResponse)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddCertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddPrecertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddSerial", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateRegistration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
+ out := new(proto.Order)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrder", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error) {
+ out := new(proto.Order)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrderAndAuthzs", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderProcessing", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderError", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeOrder", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
+ out := new(proto.Order)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrder", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) {
+ out := new(proto.Order)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrderForNames", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/RevokeCertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRevokedCertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error) {
+ out := new(Authorization2IDs)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeAuthorization2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateAuthorization2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddBlockedKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// StorageAuthorityServer is the server API for StorageAuthority service.
+// All implementations must embed UnimplementedStorageAuthorityServer
+// for forward compatibility
+type StorageAuthorityServer interface {
+ // Getters
+ GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error)
+ GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error)
+ GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error)
+ GetCertificate(context.Context, *Serial) (*proto.Certificate, error)
+ GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error)
+ GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error)
+ CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error)
+ CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error)
+ CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error)
+ CountOrders(context.Context, *CountOrdersRequest) (*Count, error)
+ // Return a count of authorizations with status "invalid" that belong to
+ // a given registration ID and expire in the given time range.
+ CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error)
+ FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error)
+ PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error)
+ GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error)
+ GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error)
+ GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error)
+ CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error)
+ GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error)
+ CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error)
+ GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error)
+ KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error)
+ // Adders
+ NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error)
+ UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error)
+ AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error)
+ AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error)
+ AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error)
+ DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error)
+ NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error)
+ NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error)
+ SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error)
+ SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error)
+ FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error)
+ GetOrder(context.Context, *OrderRequest) (*proto.Order, error)
+ GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error)
+ RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error)
+ UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error)
+ NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error)
+ FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error)
+ DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error)
+ AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error)
+ mustEmbedUnimplementedStorageAuthorityServer()
+}
+
+// UnimplementedStorageAuthorityServer must be embedded to have forward compatible implementations.
+type UnimplementedStorageAuthorityServer struct {
+}
+
+func (UnimplementedStorageAuthorityServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetPrecertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountCertificatesByNames not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIP not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIPRange not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountOrders(context.Context, *CountOrdersRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountOrders not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountFQDNSets not implemented")
+}
+func (UnimplementedStorageAuthorityServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented")
+}
+func (UnimplementedStorageAuthorityServer) PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PreviousCertificateExists not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetPendingAuthorization2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented")
+}
+func (UnimplementedStorageAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented")
+}
+func (UnimplementedStorageAuthorityServer) UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistration not implemented")
+}
+func (UnimplementedStorageAuthorityServer) AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddCertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddPrecertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddSerial not implemented")
+}
+func (UnimplementedStorageAuthorityServer) DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeactivateRegistration not implemented")
+}
+func (UnimplementedStorageAuthorityServer) NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NewOrder not implemented")
+}
+func (UnimplementedStorageAuthorityServer) NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NewOrderAndAuthzs not implemented")
+}
+func (UnimplementedStorageAuthorityServer) SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetOrderProcessing not implemented")
+}
+func (UnimplementedStorageAuthorityServer) SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetOrderError not implemented")
+}
+func (UnimplementedStorageAuthorityServer) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FinalizeOrder not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented")
+}
+func (UnimplementedStorageAuthorityServer) RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateRevokedCertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NewAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FinalizeAuthorization2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeactivateAuthorization2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddBlockedKey not implemented")
+}
+func (UnimplementedStorageAuthorityServer) mustEmbedUnimplementedStorageAuthorityServer() {}
+
+// UnsafeStorageAuthorityServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to StorageAuthorityServer will
+// result in compilation errors.
+type UnsafeStorageAuthorityServer interface {
+ mustEmbedUnimplementedStorageAuthorityServer()
+}
+
+func RegisterStorageAuthorityServer(s grpc.ServiceRegistrar, srv StorageAuthorityServer) {
+ s.RegisterService(&StorageAuthority_ServiceDesc, srv)
+}
+
+func _StorageAuthority_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RegistrationID)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetRegistration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetRegistration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetRegistration(ctx, req.(*RegistrationID))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(JSONWebKey)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetRegistrationByKey",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, req.(*JSONWebKey))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Serial)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetSerialMetadata",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, req.(*Serial))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Serial)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetCertificate(ctx, req.(*Serial))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Serial)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetPrecertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetPrecertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetPrecertificate(ctx, req.(*Serial))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Serial)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetCertificateStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, req.(*Serial))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountCertificatesByNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountCertificatesByNamesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountCertificatesByNames",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, req.(*CountCertificatesByNamesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountRegistrationsByIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountRegistrationsByIPRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountRegistrationsByIP",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, req.(*CountRegistrationsByIPRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountRegistrationsByIPRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountRegistrationsByIPRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountRegistrationsByIPRange",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, req.(*CountRegistrationsByIPRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountOrders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountOrdersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountOrders(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountOrders",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountOrders(ctx, req.(*CountOrdersRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountFQDNSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountFQDNSetsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountFQDNSets(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountFQDNSets",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountFQDNSets(ctx, req.(*CountFQDNSetsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FQDNSetExistsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).FQDNSetExists(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/FQDNSetExists",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_PreviousCertificateExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PreviousCertificateExistsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/PreviousCertificateExists",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, req.(*PreviousCertificateExistsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthorizationID2)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetAuthorization2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetAuthorization2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetAuthorization2(ctx, req.(*AuthorizationID2))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, req.(*GetAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetPendingAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetPendingAuthorizationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetPendingAuthorization2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, req.(*GetPendingAuthorizationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RegistrationID)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountPendingAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetValidOrderAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetValidOrderAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountInvalidAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountInvalidAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetValidAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetValidAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(KeyBlockedRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).KeyBlocked(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/KeyBlocked",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).KeyBlocked(ctx, req.(*KeyBlockedRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_NewRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(proto.Registration)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).NewRegistration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/NewRegistration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).NewRegistration(ctx, req.(*proto.Registration))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_UpdateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(proto.Registration)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).UpdateRegistration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/UpdateRegistration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).UpdateRegistration(ctx, req.(*proto.Registration))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_AddCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).AddCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/AddCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).AddCertificate(ctx, req.(*AddCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_AddPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).AddPrecertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/AddPrecertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).AddPrecertificate(ctx, req.(*AddCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_AddSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddSerialRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).AddSerial(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/AddSerial",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).AddSerial(ctx, req.(*AddSerialRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RegistrationID)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/DeactivateRegistration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, req.(*RegistrationID))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_NewOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NewOrderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).NewOrder(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/NewOrder",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).NewOrder(ctx, req.(*NewOrderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_NewOrderAndAuthzs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NewOrderAndAuthzsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/NewOrderAndAuthzs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, req.(*NewOrderAndAuthzsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_SetOrderProcessing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(OrderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/SetOrderProcessing",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, req.(*OrderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_SetOrderError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetOrderErrorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).SetOrderError(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/SetOrderError",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).SetOrderError(ctx, req.(*SetOrderErrorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FinalizeOrderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).FinalizeOrder(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/FinalizeOrder",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).FinalizeOrder(ctx, req.(*FinalizeOrderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(OrderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetOrder(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetOrder",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetOrder(ctx, req.(*OrderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetOrderForNamesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetOrderForNames(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetOrderForNames",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RevokeCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).RevokeCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/RevokeCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).RevokeCertificate(ctx, req.(*RevokeCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_UpdateRevokedCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RevokeCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/UpdateRevokedCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, req.(*RevokeCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_NewAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddPendingAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/NewAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, req.(*AddPendingAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_FinalizeAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FinalizeAuthorizationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/FinalizeAuthorization2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, req.(*FinalizeAuthorizationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_DeactivateAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthorizationID2)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/DeactivateAuthorization2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, req.(*AuthorizationID2))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_AddBlockedKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddBlockedKeyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).AddBlockedKey(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/AddBlockedKey",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).AddBlockedKey(ctx, req.(*AddBlockedKeyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// StorageAuthority_ServiceDesc is the grpc.ServiceDesc for StorageAuthority service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var StorageAuthority_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "sa.StorageAuthority",
+ HandlerType: (*StorageAuthorityServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GetRegistration",
+ Handler: _StorageAuthority_GetRegistration_Handler,
+ },
+ {
+ MethodName: "GetRegistrationByKey",
+ Handler: _StorageAuthority_GetRegistrationByKey_Handler,
+ },
+ {
+ MethodName: "GetSerialMetadata",
+ Handler: _StorageAuthority_GetSerialMetadata_Handler,
+ },
+ {
+ MethodName: "GetCertificate",
+ Handler: _StorageAuthority_GetCertificate_Handler,
+ },
+ {
+ MethodName: "GetPrecertificate",
+ Handler: _StorageAuthority_GetPrecertificate_Handler,
+ },
+ {
+ MethodName: "GetCertificateStatus",
+ Handler: _StorageAuthority_GetCertificateStatus_Handler,
+ },
+ {
+ MethodName: "CountCertificatesByNames",
+ Handler: _StorageAuthority_CountCertificatesByNames_Handler,
+ },
+ {
+ MethodName: "CountRegistrationsByIP",
+ Handler: _StorageAuthority_CountRegistrationsByIP_Handler,
+ },
+ {
+ MethodName: "CountRegistrationsByIPRange",
+ Handler: _StorageAuthority_CountRegistrationsByIPRange_Handler,
+ },
+ {
+ MethodName: "CountOrders",
+ Handler: _StorageAuthority_CountOrders_Handler,
+ },
+ {
+ MethodName: "CountFQDNSets",
+ Handler: _StorageAuthority_CountFQDNSets_Handler,
+ },
+ {
+ MethodName: "FQDNSetExists",
+ Handler: _StorageAuthority_FQDNSetExists_Handler,
+ },
+ {
+ MethodName: "PreviousCertificateExists",
+ Handler: _StorageAuthority_PreviousCertificateExists_Handler,
+ },
+ {
+ MethodName: "GetAuthorization2",
+ Handler: _StorageAuthority_GetAuthorization2_Handler,
+ },
+ {
+ MethodName: "GetAuthorizations2",
+ Handler: _StorageAuthority_GetAuthorizations2_Handler,
+ },
+ {
+ MethodName: "GetPendingAuthorization2",
+ Handler: _StorageAuthority_GetPendingAuthorization2_Handler,
+ },
+ {
+ MethodName: "CountPendingAuthorizations2",
+ Handler: _StorageAuthority_CountPendingAuthorizations2_Handler,
+ },
+ {
+ MethodName: "GetValidOrderAuthorizations2",
+ Handler: _StorageAuthority_GetValidOrderAuthorizations2_Handler,
+ },
+ {
+ MethodName: "CountInvalidAuthorizations2",
+ Handler: _StorageAuthority_CountInvalidAuthorizations2_Handler,
+ },
+ {
+ MethodName: "GetValidAuthorizations2",
+ Handler: _StorageAuthority_GetValidAuthorizations2_Handler,
+ },
+ {
+ MethodName: "KeyBlocked",
+ Handler: _StorageAuthority_KeyBlocked_Handler,
+ },
+ {
+ MethodName: "NewRegistration",
+ Handler: _StorageAuthority_NewRegistration_Handler,
+ },
+ {
+ MethodName: "UpdateRegistration",
+ Handler: _StorageAuthority_UpdateRegistration_Handler,
+ },
+ {
+ MethodName: "AddCertificate",
+ Handler: _StorageAuthority_AddCertificate_Handler,
+ },
+ {
+ MethodName: "AddPrecertificate",
+ Handler: _StorageAuthority_AddPrecertificate_Handler,
+ },
+ {
+ MethodName: "AddSerial",
+ Handler: _StorageAuthority_AddSerial_Handler,
+ },
+ {
+ MethodName: "DeactivateRegistration",
+ Handler: _StorageAuthority_DeactivateRegistration_Handler,
+ },
+ {
+ MethodName: "NewOrder",
+ Handler: _StorageAuthority_NewOrder_Handler,
+ },
+ {
+ MethodName: "NewOrderAndAuthzs",
+ Handler: _StorageAuthority_NewOrderAndAuthzs_Handler,
+ },
+ {
+ MethodName: "SetOrderProcessing",
+ Handler: _StorageAuthority_SetOrderProcessing_Handler,
+ },
+ {
+ MethodName: "SetOrderError",
+ Handler: _StorageAuthority_SetOrderError_Handler,
+ },
+ {
+ MethodName: "FinalizeOrder",
+ Handler: _StorageAuthority_FinalizeOrder_Handler,
+ },
+ {
+ MethodName: "GetOrder",
+ Handler: _StorageAuthority_GetOrder_Handler,
+ },
+ {
+ MethodName: "GetOrderForNames",
+ Handler: _StorageAuthority_GetOrderForNames_Handler,
+ },
+ {
+ MethodName: "RevokeCertificate",
+ Handler: _StorageAuthority_RevokeCertificate_Handler,
+ },
+ {
+ MethodName: "UpdateRevokedCertificate",
+ Handler: _StorageAuthority_UpdateRevokedCertificate_Handler,
+ },
+ {
+ MethodName: "NewAuthorizations2",
+ Handler: _StorageAuthority_NewAuthorizations2_Handler,
+ },
+ {
+ MethodName: "FinalizeAuthorization2",
+ Handler: _StorageAuthority_FinalizeAuthorization2_Handler,
+ },
+ {
+ MethodName: "DeactivateAuthorization2",
+ Handler: _StorageAuthority_DeactivateAuthorization2_Handler,
+ },
+ {
+ MethodName: "AddBlockedKey",
+ Handler: _StorageAuthority_AddBlockedKey_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "sa.proto",
+}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go b/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go
new file mode 100644
index 000000000..fcf52279d
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go
@@ -0,0 +1,46 @@
+// Copied from the auto-generated sa_grpc.pb.go
+
+package proto
+
+import (
+ context "context"
+
+ proto "github.com/letsencrypt/boulder/core/proto"
+ grpc "google.golang.org/grpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// StorageAuthorityGetterClient is a read-only subset of the sapb.StorageAuthorityClient interface
+type StorageAuthorityGetterClient interface {
+ GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
+ GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
+ GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
+ CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error)
+ CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
+ CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
+ CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error)
+ CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error)
+ FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
+ PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error)
+ GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
+ GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
+ CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
+ GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
+ GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error)
+ GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
+}
+
+// StorageAuthorityCertificateClient is a subset of the sapb.StorageAuthorityClient interface that only reads and writes certificates
+type StorageAuthorityCertificateClient interface {
+ AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error)
+ GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+}
diff --git a/vendor/github.com/openshift/imagebuilder/.travis.yml b/vendor/github.com/openshift/imagebuilder/.travis.yml
index ff6afee5a..42713c92c 100644
--- a/vendor/github.com/openshift/imagebuilder/.travis.yml
+++ b/vendor/github.com/openshift/imagebuilder/.travis.yml
@@ -1,16 +1,23 @@
language: go
+services:
+ - docker
+
go:
- - "1.16"
- "1.17"
+ - "1.18"
-install:
+before_install:
+ - sudo apt-get update -q -y
+ - sudo apt-get install -q -y golang
+ - docker pull busybox
+ - docker pull centos:7
+ - chmod -R go-w ./dockerclient/testdata
script:
- make build
- make test
+ - travis_wait 30 make test-conformance
notifications:
irc: "chat.freenode.net#openshift-dev"
-
-sudo: false
diff --git a/vendor/github.com/openshift/imagebuilder/Makefile b/vendor/github.com/openshift/imagebuilder/Makefile
index 1cbb26eed..1a0cf0df2 100644
--- a/vendor/github.com/openshift/imagebuilder/Makefile
+++ b/vendor/github.com/openshift/imagebuilder/Makefile
@@ -3,9 +3,9 @@ build:
.PHONY: build
test:
- go test $(go list ./... | grep -v /vendor/)
+ go test ./...
.PHONY: test
test-conformance:
- go test -v -tags conformance -timeout 10m ./dockerclient
+ go test -v -tags conformance -timeout 30m ./dockerclient
.PHONY: test-conformance
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
index 71dc41ea5..db0c2f11b 100644
--- a/vendor/github.com/openshift/imagebuilder/builder.go
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -215,6 +215,17 @@ type Stage struct {
func NewStages(node *parser.Node, b *Builder) (Stages, error) {
var stages Stages
+ var allDeclaredArgs []string
+ for _, root := range SplitBy(node, command.Arg) {
+ argNode := root.Children[0]
+ if argNode.Value == command.Arg {
+ // extract declared variable
+ s := strings.SplitN(argNode.Original, " ", 2)
+ if len(s) == 2 && (strings.ToLower(s[0]) == command.Arg) {
+ allDeclaredArgs = append(allDeclaredArgs, s[1])
+ }
+ }
+ }
if err := b.extractHeadingArgsFromNode(node); err != nil {
return stages, err
}
@@ -226,7 +237,7 @@ func NewStages(node *parser.Node, b *Builder) (Stages, error) {
stages = append(stages, Stage{
Position: i,
Name: name,
- Builder: b.builderForStage(),
+ Builder: b.builderForStage(allDeclaredArgs),
Node: root,
})
}
@@ -290,8 +301,8 @@ func extractNameFromNode(node *parser.Node) (string, bool) {
return n.Next.Value, true
}
-func (b *Builder) builderForStage() *Builder {
- stageBuilder := NewBuilder(b.UserArgs)
+func (b *Builder) builderForStage(globalArgsList []string) *Builder {
+ stageBuilder := newBuilderWithGlobalAllowedArgs(b.UserArgs, globalArgsList)
for k, v := range b.HeadingArgs {
stageBuilder.HeadingArgs[k] = v
}
@@ -307,6 +318,12 @@ type Builder struct {
UserArgs map[string]string
CmdSet bool
Author string
+ // Certain instructions like `FROM` will need to use
+ // `ARG` decalred before or not in this stage hence
+ // while processing instruction like `FROM ${SOME_ARG}`
+ // we will make sure to verify if they are declared any
+ // where in containerfile or not.
+ GlobalAllowedArgs []string
AllowedArgs map[string]bool
Volumes VolumeSet
@@ -323,6 +340,10 @@ type Builder struct {
}
func NewBuilder(args map[string]string) *Builder {
+ return newBuilderWithGlobalAllowedArgs(args, []string{})
+}
+
+func newBuilderWithGlobalAllowedArgs(args map[string]string, globalallowedargs []string) *Builder {
allowed := make(map[string]bool)
for k, v := range builtinAllowedBuildArgs {
allowed[k] = v
@@ -334,10 +355,11 @@ func NewBuilder(args map[string]string) *Builder {
initialArgs[k] = v
}
return &Builder{
- Args: initialArgs,
- UserArgs: userArgs,
- HeadingArgs: make(map[string]string),
- AllowedArgs: allowed,
+ Args: initialArgs,
+ UserArgs: userArgs,
+ HeadingArgs: make(map[string]string),
+ AllowedArgs: allowed,
+ GlobalAllowedArgs: globalallowedargs,
}
}
diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go
index c437e4c23..2afbbf1b2 100644
--- a/vendor/github.com/openshift/imagebuilder/dispatchers.go
+++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go
@@ -142,7 +142,13 @@ func add(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
var chmod string
last := len(args) - 1
dest := makeAbsolute(args[last], b.RunConfig.WorkingDir)
- userArgs := mergeEnv(envMapAsSlice(b.Args), b.Env)
+ filteredUserArgs := make(map[string]string)
+ for k, v := range b.Args {
+ if _, ok := b.AllowedArgs[k]; ok {
+ filteredUserArgs[k] = v
+ }
+ }
+ userArgs := mergeEnv(envMapAsSlice(filteredUserArgs), b.Env)
for _, a := range flagArgs {
arg, err := ProcessWord(a, userArgs)
if err != nil {
@@ -223,8 +229,20 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri
for n, v := range b.HeadingArgs {
argStrs = append(argStrs, n+"="+v)
}
+ defaultArgs := envMapAsSlice(builtinBuildArgs)
+ filteredUserArgs := make(map[string]string)
+ for k, v := range b.UserArgs {
+ for _, a := range b.GlobalAllowedArgs {
+ if a == k {
+ filteredUserArgs[k] = v
+ }
+ }
+ }
+ userArgs := mergeEnv(envMapAsSlice(filteredUserArgs), b.Env)
+ userArgs = mergeEnv(defaultArgs, userArgs)
+ nameArgs := mergeEnv(argStrs, userArgs)
var err error
- if name, err = ProcessWord(name, argStrs); err != nil {
+ if name, err = ProcessWord(name, nameArgs); err != nil {
return err
}
@@ -234,9 +252,6 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri
return fmt.Errorf("Windows does not support FROM scratch")
}
}
- defaultArgs := envMapAsSlice(builtinBuildArgs)
- userArgs := mergeEnv(envMapAsSlice(b.Args), b.Env)
- userArgs = mergeEnv(defaultArgs, userArgs)
for _, a := range flagArgs {
arg, err := ProcessWord(a, userArgs)
if err != nil {
@@ -323,7 +338,13 @@ func run(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
args = handleJSONArgs(args, attributes)
var mounts []string
- userArgs := mergeEnv(envMapAsSlice(b.Args), b.Env)
+ filteredUserArgs := make(map[string]string)
+ for k, v := range b.Args {
+ if _, ok := b.AllowedArgs[k]; ok {
+ filteredUserArgs[k] = v
+ }
+ }
+ userArgs := mergeEnv(envMapAsSlice(filteredUserArgs), b.Env)
for _, a := range flagArgs {
arg, err := ProcessWord(a, userArgs)
if err != nil {
diff --git a/vendor/github.com/proglottis/gpgme/.appveyor.yml b/vendor/github.com/proglottis/gpgme/.appveyor.yml
deleted file mode 100644
index 2fdc09ab5..000000000
--- a/vendor/github.com/proglottis/gpgme/.appveyor.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-version: 0.{build}
-platform: x86
-branches:
- only:
- - master
-
-clone_folder: c:\gopath\src\github.com\proglottis\gpgme
-
-environment:
- GOPATH: c:\gopath
- GOROOT: C:\go-x86
- CGO_LDFLAGS: -LC:\gpg\lib
- CGO_CFLAGS: -IC:\gpg\include
- GPG_DIR: C:\gpg
-
-install:
- - nuget install 7ZipCLI -ExcludeVersion
- - set PATH=%appveyor_build_folder%\7ZipCLI\tools;%PATH%
- - appveyor DownloadFile https://www.gnupg.org/ftp/gcrypt/binary/gnupg-w32-2.1.20_20170403.exe -FileName gnupg-w32-2.1.20_20170403.exe
- - 7z x -o%GPG_DIR% gnupg-w32-2.1.20_20170403.exe
- - copy "%GPG_DIR%\lib\libgpg-error.imp" "%GPG_DIR%\lib\libgpg-error.a"
- - copy "%GPG_DIR%\lib\libassuan.imp" "%GPG_DIR%\lib\libassuan.a"
- - copy "%GPG_DIR%\lib\libgpgme.imp" "%GPG_DIR%\lib\libgpgme.a"
- - set PATH=%GOPATH%\bin;%GOROOT%\bin;%GPG_DIR%\bin;C:\MinGW\bin;%PATH%
- - C:\cygwin\bin\sed -i 's/"GPG_AGENT_INFO"/"GPG_AGENT_INFO="/;s/C.unsetenv(v)/C.putenv(v)/' %APPVEYOR_BUILD_FOLDER%\gpgme.go
-
-test_script:
- - go test -v github.com/proglottis/gpgme
-
-
-build_script:
- - go build -o example_decrypt.exe -i %APPVEYOR_BUILD_FOLDER%\examples\decrypt.go
- - go build -o example_encrypt.exe -i %APPVEYOR_BUILD_FOLDER%\examples\encrypt.go
-
-artifacts:
- - path: example_decrypt.exe
- name: decrypt example binary
- - path: example_encrypt.exe
- name: encrypt example binary \ No newline at end of file
diff --git a/vendor/github.com/proglottis/gpgme/.travis.yml b/vendor/github.com/proglottis/gpgme/.travis.yml
deleted file mode 100644
index 619e33721..000000000
--- a/vendor/github.com/proglottis/gpgme/.travis.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-language: go
-os:
- - linux
- - osx
- - windows
-dist: xenial
-sudo: false
-
-go:
- - 1.11
- - 1.12
- - 1.13
-
-addons:
- apt:
- packages:
- - libgpgme11-dev
- homebrew:
- packages:
- - gnupg
- - gnupg@1.4
- - gpgme
- update: true
-
-matrix:
- allow_failures:
- - os: windows
-
-before_install:
- - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install msys2; fi
- - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install gpg4win; fi
diff --git a/vendor/github.com/proglottis/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go
index 82effbd9e..8f495ddf5 100644
--- a/vendor/github.com/proglottis/gpgme/gpgme.go
+++ b/vendor/github.com/proglottis/gpgme/gpgme.go
@@ -2,7 +2,6 @@
package gpgme
// #cgo pkg-config: gpgme
-// #cgo LDFLAGS: -lgpgme -lassuan -lgpg-error
// #cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
// #include <stdlib.h>
// #include <gpgme.h>
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
index 44986bff0..c67ff1b7f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -1 +1 @@
-See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
+See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
new file mode 100644
index 000000000..450189f35
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "runtime/debug"
+
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
+func NewBuildInfoCollector() Collector {
+ path, version, sum := "unknown", "unknown", "unknown"
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ path = bi.Main.Path
+ version = bi.Main.Version
+ sum = bi.Main.Sum
+ }
+ c := &selfCollector{MustNewConstMetric(
+ NewDesc(
+ "go_build_info",
+ "Build information about the main Go module.",
+ nil, Labels{"path": path, "version": version, "checksum": sum},
+ ),
+ GaugeValue, 1)}
+ c.init(c.self)
+ return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 1e839650d..ac1ca3cf5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
+
+// collectorMetric is a metric that is also a collector.
+// Because of selfCollector, most (if not all) Metrics in
+// this package are also collectors.
+type collectorMetric interface {
+ Metric
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 3f8fd790d..00d70f09b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -133,10 +133,14 @@ func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
-func (c *counter) Write(out *dto.Metric) error {
+func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
- val := fval + float64(ival)
+ return fval + float64(ival)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+ val := c.get()
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index a96ed1cee..08195b410 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -16,32 +16,209 @@ package prometheus
import (
"runtime"
"runtime/debug"
- "sync"
"time"
)
-type goCollector struct {
+func goRuntimeMemStats() memStatsMetrics {
+ return memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ },
+ }
+}
+
+type baseGoCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
+ gcLastTimeDesc *Desc
goInfoDesc *Desc
-
- // ms... are memstats related.
- msLast *runtime.MemStats // Previously collected memstats.
- msLastTimestamp time.Time
- msMtx sync.Mutex // Protects msLast and msLastTimestamp.
- msMetrics memStatsMetrics
- msRead func(*runtime.MemStats) // For mocking in tests.
- msMaxWait time.Duration // Wait time for fresh memstats.
- msMaxAge time.Duration // Maximum allowed age of old memstats.
}
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector() Collector {
- return &goCollector{
+func newBaseGoCollector() baseGoCollector {
+ return baseGoCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
@@ -54,243 +231,28 @@ func NewGoCollector() Collector {
"go_gc_duration_seconds",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
+ gcLastTimeDesc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
- msLast: &runtime.MemStats{},
- msRead: runtime.ReadMemStats,
- msMaxWait: time.Second,
- msMaxAge: 5 * time.Minute,
- msMetrics: memStatsMetrics{
- {
- desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("frees_total"),
- "Total number of frees.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of allocated objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("last_gc_time_seconds"),
- "Number of seconds since 1970 of last garbage collection.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_cpu_fraction"),
- "The fraction of this program's available CPU time used by the GC since the program started.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
- valType: GaugeValue,
- },
- },
}
}
-func memstatNamespace(s string) string {
- return "go_memstats_" + s
-}
-
// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
+ ch <- c.gcLastTimeDesc
ch <- c.goInfoDesc
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
}
// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- var (
- ms = &runtime.MemStats{}
- done = make(chan struct{})
- )
- // Start reading memstats first as it might take a while.
- go func() {
- c.msRead(ms)
- c.msMtx.Lock()
- c.msLast = ms
- c.msLastTimestamp = time.Now()
- c.msMtx.Unlock()
- close(done)
- }()
-
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@@ -305,63 +267,19 @@ func (c *goCollector) Collect(ch chan<- Metric) {
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+ ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
-
- timer := time.NewTimer(c.msMaxWait)
- select {
- case <-done: // Our own ReadMemStats succeeded in time. Use it.
- timer.Stop() // Important for high collection frequencies to not pile up timers.
- c.msCollect(ch, ms)
- return
- case <-timer.C: // Time out, use last memstats if possible. Continue below.
- }
- c.msMtx.Lock()
- if time.Since(c.msLastTimestamp) < c.msMaxAge {
- // Last memstats are recent enough. Collect from them under the lock.
- c.msCollect(ch, c.msLast)
- c.msMtx.Unlock()
- return
- }
- // If we are here, the last memstats are too old or don't exist. We have
- // to wait until our own ReadMemStats finally completes. For that to
- // happen, we have to release the lock.
- c.msMtx.Unlock()
- <-done
- c.msCollect(ch, ms)
}
-func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
- }
+func memstatNamespace(s string) string {
+ return "go_memstats_" + s
}
-// memStatsMetrics provide description, value, and value type for memstat metrics.
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
+// value type for memstat metrics.
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}
-
-// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewBuildInfoCollector instead.
-func NewBuildInfoCollector() Collector {
- path, version, sum := "unknown", "unknown", "unknown"
- if bi, ok := debug.ReadBuildInfo(); ok {
- path = bi.Main.Path
- version = bi.Main.Version
- sum = bi.Main.Sum
- }
- c := &selfCollector{MustNewConstMetric(
- NewDesc(
- "go_build_info",
- "Build information about the main Go module.",
- nil, Labels{"path": path, "version": version, "checksum": sum},
- ),
- GaugeValue, 1)}
- c.init(c.self)
- return c
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
new file mode 100644
index 000000000..24526131e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -0,0 +1,107 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package prometheus
+
+import (
+ "runtime"
+ "sync"
+ "time"
+)
+
+type goCollector struct {
+ base baseGoCollector
+
+ // ms... are memstats related.
+ msLast *runtime.MemStats // Previously collected memstats.
+ msLastTimestamp time.Time
+ msMtx sync.Mutex // Protects msLast and msLastTimestamp.
+ msMetrics memStatsMetrics
+ msRead func(*runtime.MemStats) // For mocking in tests.
+ msMaxWait time.Duration // Wait time for fresh memstats.
+ msMaxAge time.Duration // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+ return &goCollector{
+ base: newBaseGoCollector(),
+ msLast: &runtime.MemStats{},
+ msRead: runtime.ReadMemStats,
+ msMaxWait: time.Second,
+ msMaxAge: 5 * time.Minute,
+ msMetrics: goRuntimeMemStats(),
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ var (
+ ms = &runtime.MemStats{}
+ done = make(chan struct{})
+ )
+ // Start reading memstats first as it might take a while.
+ go func() {
+ c.msRead(ms)
+ c.msMtx.Lock()
+ c.msLast = ms
+ c.msLastTimestamp = time.Now()
+ c.msMtx.Unlock()
+ close(done)
+ }()
+
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ timer := time.NewTimer(c.msMaxWait)
+ select {
+ case <-done: // Our own ReadMemStats succeeded in time. Use it.
+ timer.Stop() // Important for high collection frequencies to not pile up timers.
+ c.msCollect(ch, ms)
+ return
+ case <-timer.C: // Time out, use last memstats if possible. Continue below.
+ }
+ c.msMtx.Lock()
+ if time.Since(c.msLastTimestamp) < c.msMaxAge {
+ // Last memstats are recent enough. Collect from them under the lock.
+ c.msCollect(ch, c.msLast)
+ c.msMtx.Unlock()
+ return
+ }
+ // If we are here, the last memstats are too old or don't exist. We have
+ // to wait until our own ReadMemStats finally completes. For that to
+ // happen, we have to release the lock.
+ c.msMtx.Unlock()
+ <-done
+ c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go
new file mode 100644
index 000000000..d43bdcdda
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go
@@ -0,0 +1,408 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package prometheus
+
+import (
+ "math"
+ "runtime"
+ "runtime/metrics"
+ "strings"
+ "sync"
+
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/client_golang/prometheus/internal"
+ dto "github.com/prometheus/client_model/go"
+)
+
+type goCollector struct {
+ base baseGoCollector
+
+ // mu protects updates to all fields ensuring a consistent
+ // snapshot is always produced by Collect.
+ mu sync.Mutex
+
+ // rm... fields all pertain to the runtime/metrics package.
+ rmSampleBuf []metrics.Sample
+ rmSampleMap map[string]*metrics.Sample
+ rmMetrics []collectorMetric
+
+ // With Go 1.17, the runtime/metrics package was introduced.
+ // From that point on, metric names produced by the runtime/metrics
+ // package could be generated from runtime/metrics names. However,
+ // these differ from the old names for the same values.
+ //
+ // This field exist to export the same values under the old names
+ // as well.
+ msMetrics memStatsMetrics
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+ descriptions := metrics.All()
+
+ // Collect all histogram samples so that we can get their buckets.
+ // The API guarantees that the buckets are always fixed for the lifetime
+ // of the process.
+ var histograms []metrics.Sample
+ for _, d := range descriptions {
+ if d.Kind == metrics.KindFloat64Histogram {
+ histograms = append(histograms, metrics.Sample{Name: d.Name})
+ }
+ }
+ metrics.Read(histograms)
+ bucketsMap := make(map[string][]float64)
+ for i := range histograms {
+ bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
+ }
+
+ // Generate a Desc and ValueType for each runtime/metrics metric.
+ metricSet := make([]collectorMetric, 0, len(descriptions))
+ sampleBuf := make([]metrics.Sample, 0, len(descriptions))
+ sampleMap := make(map[string]*metrics.Sample, len(descriptions))
+ for i := range descriptions {
+ d := &descriptions[i]
+ namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
+ if !ok {
+ // Just ignore this metric; we can't do anything with it here.
+ // If a user decides to use the latest version of Go, we don't want
+ // to fail here. This condition is tested elsewhere.
+ continue
+ }
+
+ // Set up sample buffer for reading, and a map
+ // for quick lookup of sample values.
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
+ sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
+
+ var m collectorMetric
+ if d.Kind == metrics.KindFloat64Histogram {
+ _, hasSum := rmExactSumMap[d.Name]
+ unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
+ m = newBatchHistogram(
+ NewDesc(
+ BuildFQName(namespace, subsystem, name),
+ d.Description,
+ nil,
+ nil,
+ ),
+ internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
+ hasSum,
+ )
+ } else if d.Cumulative {
+ m = NewCounter(CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: d.Description,
+ })
+ } else {
+ m = NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: d.Description,
+ })
+ }
+ metricSet = append(metricSet, m)
+ }
+ return &goCollector{
+ base: newBaseGoCollector(),
+ rmSampleBuf: sampleBuf,
+ rmSampleMap: sampleMap,
+ rmMetrics: metricSet,
+ msMetrics: goRuntimeMemStats(),
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+ for _, m := range c.rmMetrics {
+ ch <- m.Desc()
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ // Collect must be thread-safe, so prevent concurrent use of
+ // rmSampleBuf. Just read into rmSampleBuf but write all the data
+ // we get into our Metrics or MemStats.
+ //
+ // This lock also ensures that the Metrics we send out are all from
+ // the same updates, ensuring their mutual consistency insofar as
+ // is guaranteed by the runtime/metrics package.
+ //
+ // N.B. This locking is heavy-handed, but Collect is expected to be called
+ // relatively infrequently. Also the core operation here, metrics.Read,
+ // is fast (O(tens of microseconds)) so contention should certainly be
+ // low, though channel operations and any allocations may add to that.
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Populate runtime/metrics sample buffer.
+ metrics.Read(c.rmSampleBuf)
+
+ // Update all our metrics from rmSampleBuf.
+ for i, sample := range c.rmSampleBuf {
+ // N.B. switch on concrete type because it's significantly more efficient
+ // than checking for the Counter and Gauge interface implementations. In
+ // this case, we control all the types here.
+ switch m := c.rmMetrics[i].(type) {
+ case *counter:
+ // Guard against decreases. This should never happen, but a failure
+ // to do so will result in a panic, which is a harsh consequence for
+ // a metrics collection bug.
+ v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
+ if v1 > v0 {
+ m.Add(unwrapScalarRMValue(sample.Value) - m.get())
+ }
+ m.Collect(ch)
+ case *gauge:
+ m.Set(unwrapScalarRMValue(sample.Value))
+ m.Collect(ch)
+ case *batchHistogram:
+ m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
+ m.Collect(ch)
+ default:
+ panic("unexpected metric type")
+ }
+ }
+ // ms is a dummy MemStats that we populate ourselves so that we can
+ // populate the old metrics from it.
+ var ms runtime.MemStats
+ memStatsFromRM(&ms, c.rmSampleMap)
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+ }
+}
+
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
+// to be scalar and returns the equivalent float64 value. Panics if the
+// value is not scalar.
+func unwrapScalarRMValue(v metrics.Value) float64 {
+ switch v.Kind() {
+ case metrics.KindUint64:
+ return float64(v.Uint64())
+ case metrics.KindFloat64:
+ return v.Float64()
+ case metrics.KindBad:
+ // Unsupported metric.
+ //
+ // This should never happen because we always populate our metric
+ // set from the runtime/metrics package.
+ panic("unexpected unsupported metric")
+ default:
+ // Unsupported metric kind.
+ //
+ // This should never happen because we check for this during initialization
+ // and flag and filter metrics whose kinds we don't understand.
+ panic("unexpected unsupported metric kind")
+ }
+}
+
+var rmExactSumMap = map[string]string{
+ "/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes",
+ "/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes",
+}
+
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
+// its exact sum exists.
+//
+// The runtime/metrics API for histograms doesn't currently expose exact
+// sums, but some of the other metrics are in fact exact sums of histograms.
+func (c *goCollector) exactSumFor(rmName string) float64 {
+ sumName, ok := rmExactSumMap[rmName]
+ if !ok {
+ return 0
+ }
+ s, ok := c.rmSampleMap[sumName]
+ if !ok {
+ return 0
+ }
+ return unwrapScalarRMValue(s.Value)
+}
+
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
+ lookupOrZero := func(name string) uint64 {
+ if s, ok := rm[name]; ok {
+ return s.Value.Uint64()
+ }
+ return 0
+ }
+
+ // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
+ // The reason for this is because MemStats couldn't be extended at the time
+ // but there was a desire to have Mallocs at least be a little more representative,
+ // while having Mallocs - Frees still represent a live object count.
+ // Unfortunately, MemStats doesn't actually export a large allocation count,
+ // so it's impossible to pull this number out directly.
+ tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
+ ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
+ ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
+
+ ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
+ ms.Sys = lookupOrZero("/memory/classes/total:bytes")
+ ms.Lookups = 0 // Already always zero.
+ ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
+ ms.Alloc = ms.HeapAlloc
+ ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
+ ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
+ ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
+ ms.HeapSys = ms.HeapInuse + ms.HeapIdle
+ ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
+ ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
+ ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
+ ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
+ ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
+ ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
+ ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
+ ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
+ ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
+ ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
+ ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
+
+ // N.B. LastGC is omitted because runtime.GCStats already has this.
+ // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ // for more details.
+ ms.LastGC = 0
+
+ // N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
+ // and often misleading due to the fact that it's an average over the lifetime
+ // of the process.
+ // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ // for more details.
+ ms.GCCPUFraction = 0
+}
+
+// batchHistogram is a mutable histogram that is updated
+// in batches.
+type batchHistogram struct {
+ selfCollector
+
+ // Static fields updated only once.
+ desc *Desc
+ hasSum bool
+
+ // Because this histogram operates in batches, it just uses a
+ // single mutex for everything. updates are always serialized
+ // but Write calls may operate concurrently with updates.
+ // Contention between these two sources should be rare.
+ mu sync.Mutex
+ buckets []float64 // Inclusive lower bounds, like runtime/metrics.
+ counts []uint64
+ sum float64 // Used if hasSum is true.
+}
+
+// newBatchHistogram creates a new batch histogram value with the given
+// Desc, buckets, and whether or not it has an exact sum available.
+//
+// buckets must always be from the runtime/metrics package, following
+// the same conventions.
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+ h := &batchHistogram{
+ desc: desc,
+ buckets: buckets,
+ // Because buckets follows runtime/metrics conventions, there's
+ // 1 more value in the buckets list than there are buckets represented,
+ // because in runtime/metrics, the bucket values represent *boundaries*,
+ // and non-Inf boundaries are inclusive lower bounds for that bucket.
+ counts: make([]uint64, len(buckets)-1),
+ hasSum: hasSum,
+ }
+ h.init(h)
+ return h
+}
+
+// update updates the batchHistogram from a runtime/metrics histogram.
+//
+// sum must be provided if the batchHistogram was created to have an exact sum.
+// h.buckets must be a strict subset of his.Buckets.
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
+ counts, buckets := his.Counts, his.Buckets
+
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ // Clear buckets.
+ for i := range h.counts {
+ h.counts[i] = 0
+ }
+ // Copy and reduce buckets.
+ var j int
+ for i, count := range counts {
+ h.counts[j] += count
+ if buckets[i+1] == h.buckets[j+1] {
+ j++
+ }
+ }
+ if h.hasSum {
+ h.sum = sum
+ }
+}
+
+func (h *batchHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *batchHistogram) Write(out *dto.Metric) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ sum := float64(0)
+ if h.hasSum {
+ sum = h.sum
+ }
+ dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
+ totalCount := uint64(0)
+ for i, count := range h.counts {
+ totalCount += count
+ if !h.hasSum {
+ // N.B. This computed sum is an underestimate.
+ sum += h.buckets[i] * float64(count)
+ }
+
+ // Skip the +Inf bucket, but only for the bucket list.
+ // It must still count for sum and totalCount.
+ if math.IsInf(h.buckets[i+1], 1) {
+ break
+ }
+ // Float64Histogram's upper bound is exclusive, so make it inclusive
+ // by obtaining the next float64 value down, in order.
+ upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
+ dtoBuckets = append(dtoBuckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(totalCount),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+ out.Histogram = &dto.Histogram{
+ Bucket: dtoBuckets,
+ SampleCount: proto.Uint64(totalCount),
+ SampleSum: proto.Float64(sum),
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 8425640b3..893802fd6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -116,6 +116,34 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
return buckets
}
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(min, max float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBucketsRange count needs a positive count")
+ }
+ if min <= 0 {
+ panic("ExponentialBucketsRange min needs to be greater than 0")
+ }
+
+ // Formula for exponential buckets.
+ // max = min*growthFactor^(bucketCount-1)
+
+ // We know max/min and highest bucket. Solve for growthFactor.
+ growthFactor := math.Pow(max/min, 1.0/float64(count-1))
+
+ // Now that we know growthFactor, solve for each bucket.
+ buckets := make([]float64, count)
+ for i := 1; i <= count; i++ {
+ buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
+ }
+ return buckets
+}
+
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name to a non-empty string. All other fields are optional
// and can safely be left at their zero value, although it is strongly
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
new file mode 100644
index 000000000..fe0a52180
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -0,0 +1,142 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package internal
+
+import (
+ "math"
+ "path"
+ "runtime/metrics"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
+// metric description and validates whether the metric is suitable for integration
+// with Prometheus.
+//
+// Returns false if a name could not be produced, or if Prometheus does not understand
+// the runtime/metrics Kind.
+//
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
+// package exports a name with characters outside the valid Prometheus metric name
+// character set. This is theoretically possible, but should never happen in practice.
+// Still, don't rely on it.
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
+ namespace := "go"
+
+ comp := strings.SplitN(d.Name, ":", 2)
+ key := comp[0]
+ unit := comp[1]
+
+ // The last path element in the key is the name,
+ // the rest is the subsystem.
+ subsystem := path.Dir(key[1:] /* remove leading / */)
+ name := path.Base(key)
+
+ // subsystem is translated by replacing all / and - with _.
+ subsystem = strings.ReplaceAll(subsystem, "/", "_")
+ subsystem = strings.ReplaceAll(subsystem, "-", "_")
+
+ // unit is translated assuming that the unit contains no
+ // non-ASCII characters.
+ unit = strings.ReplaceAll(unit, "-", "_")
+ unit = strings.ReplaceAll(unit, "*", "_")
+ unit = strings.ReplaceAll(unit, "/", "_per_")
+
+ // name has - replaced with _ and is concatenated with the unit and
+ // other data.
+ name = strings.ReplaceAll(name, "-", "_")
+ name = name + "_" + unit
+ if d.Cumulative {
+ name = name + "_total"
+ }
+
+ valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
+ switch d.Kind {
+ case metrics.KindUint64:
+ case metrics.KindFloat64:
+ case metrics.KindFloat64Histogram:
+ default:
+ valid = false
+ }
+ return namespace, subsystem, name, valid
+}
+
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
+ switch unit {
+ case "bytes":
+ // Rebucket as powers of 2.
+ return rebucketExp(buckets, 2)
+ case "seconds":
+ // Rebucket as powers of 10 and then merge all buckets greater
+ // than 1 second into the +Inf bucket.
+ b := rebucketExp(buckets, 10)
+ for i := range b {
+ if b[i] <= 1 {
+ continue
+ }
+ b[i] = math.Inf(1)
+ b = b[:i+1]
+ break
+ }
+ return b
+ }
+ return buckets
+}
+
+// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// downsamples the buckets to those a multiple of base apart. The end result
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
+// scheme.
+func rebucketExp(buckets []float64, base float64) []float64 {
+ bucket := buckets[0]
+ var newBuckets []float64
+ // We may see a -Inf here, in which case, add it and skip it
+ // since we risk producing NaNs otherwise.
+ //
+ // We need to preserve -Inf values to maintain runtime/metrics
+ // conventions. We'll strip it out later.
+ if bucket == math.Inf(-1) {
+ newBuckets = append(newBuckets, bucket)
+ buckets = buckets[1:]
+ bucket = buckets[0]
+ }
+ // From now on, bucket should always have a non-Inf value because
+ // Infs are only ever at the ends of the bucket lists, so
+ // arithmetic operations on it are non-NaN.
+ for i := 1; i < len(buckets); i++ {
+ if bucket >= 0 && buckets[i] < bucket*base {
+ // The next bucket we want to include is at least bucket*base.
+ continue
+ } else if bucket < 0 && buckets[i] < bucket/base {
+ // In this case the bucket we're targeting is negative, and since
+ // we're ascending through buckets here, we need to divide to get
+ // closer to zero exponentially.
+ continue
+ }
+ // The +Inf bucket will always be the last one, and we'll always
+ // end up including it here because bucket
+ newBuckets = append(newBuckets, bucket)
+ bucket = buckets[i]
+ }
+ return append(newBuckets, bucket)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index 3117461cd..2dc3660da 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index c778711b8..b4e0ae11c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -21,7 +21,7 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
+ "google.golang.org/protobuf/types/known/timestamppb"
dto "github.com/prometheus/client_model/go"
)
@@ -183,8 +183,8 @@ const ExemplarMaxRunes = 64
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
e := &dto.Exemplar{}
e.Value = proto.Float64(value)
- tsProto, err := ptypes.TimestampProto(ts)
- if err != nil {
+ tsProto := timestamppb.New(ts)
+ if err := tsProto.CheckValid(); err != nil {
return nil, err
}
e.Timestamp = tsProto
diff --git a/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt
new file mode 100644
index 000000000..7a01c8498
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt
@@ -0,0 +1,14 @@
+
+Copyright 2021 The Sigstore Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/sigstore/sigstore/LICENSE b/vendor/github.com/sigstore/sigstore/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go
new file mode 100644
index 000000000..21c268550
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go
@@ -0,0 +1,170 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "time"
+)
+
+const (
+ // CertificatePEMType is the string "CERTIFICATE" to be used during PEM encoding and decoding
+ CertificatePEMType PEMType = "CERTIFICATE"
+)
+
+// MarshalCertificateToPEM converts the provided X509 certificate into PEM format
+func MarshalCertificateToPEM(cert *x509.Certificate) ([]byte, error) {
+ if cert == nil {
+ return nil, errors.New("nil certificate provided")
+ }
+ return PEMEncode(CertificatePEMType, cert.Raw), nil
+}
+
+// MarshalCertificatesToPEM converts the provided X509 certificates into PEM format
+func MarshalCertificatesToPEM(certs []*x509.Certificate) ([]byte, error) {
+ buf := bytes.Buffer{}
+ for _, cert := range certs {
+ pemBytes, err := MarshalCertificateToPEM(cert)
+ if err != nil {
+ return nil, err
+ }
+ _, _ = buf.Write(pemBytes)
+ }
+ return buf.Bytes(), nil
+}
+
+// UnmarshalCertificatesFromPEM extracts one or more X509 certificates from the provided
+// byte slice, which is assumed to be in PEM-encoded format.
+func UnmarshalCertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
+ result := []*x509.Certificate{}
+ remaining := pemBytes
+
+ for len(remaining) > 0 {
+ var certDer *pem.Block
+ certDer, remaining = pem.Decode(remaining)
+
+ if certDer == nil {
+ return nil, errors.New("error during PEM decoding")
+ }
+
+ cert, err := x509.ParseCertificate(certDer.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, cert)
+ }
+ return result, nil
+}
+
+// UnmarshalCertificatesFromPEMLimited extracts one or more X509 certificates from the provided
+// byte slice, which is assumed to be in PEM-encoded format. Fails after a specified
+// number of iterations. A reasonable limit is 10 iterations.
+func UnmarshalCertificatesFromPEMLimited(pemBytes []byte, iterations int) ([]*x509.Certificate, error) {
+ result := []*x509.Certificate{}
+ remaining := pemBytes
+
+ count := 0
+ for len(remaining) > 0 {
+ if count == iterations {
+ return nil, errors.New("too many certificates specified in PEM block")
+ }
+ var certDer *pem.Block
+ certDer, remaining = pem.Decode(remaining)
+
+ if certDer == nil {
+ return nil, errors.New("error during PEM decoding")
+ }
+
+ cert, err := x509.ParseCertificate(certDer.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, cert)
+ count++
+ }
+ return result, nil
+}
+
+// LoadCertificatesFromPEM extracts one or more X509 certificates from the provided
+// io.Reader.
+func LoadCertificatesFromPEM(pem io.Reader) ([]*x509.Certificate, error) {
+ fileBytes, err := io.ReadAll(pem)
+ if err != nil {
+ return nil, err
+ }
+ return UnmarshalCertificatesFromPEM(fileBytes)
+}
+
+func formatTime(t time.Time) string {
+ return t.UTC().Format(time.RFC3339)
+}
+
+// CheckExpiration verifies that epoch is during the validity period of
+// the certificate provided.
+//
+// It returns nil if issueTime < epoch < expirationTime, and error otherwise.
+func CheckExpiration(cert *x509.Certificate, epoch time.Time) error {
+ if cert == nil {
+ return errors.New("certificate is nil")
+ }
+ if cert.NotAfter.Before(epoch) {
+ return fmt.Errorf("certificate expiration time %s is before %s", formatTime(cert.NotAfter), formatTime(epoch))
+ }
+ if cert.NotBefore.After(epoch) {
+ return fmt.Errorf("certificate issued time %s is before %s", formatTime(cert.NotBefore), formatTime(epoch))
+ }
+ return nil
+}
+
+// ParseCSR parses a PKCS#10 PEM-encoded CSR.
+func ParseCSR(csr []byte) (*x509.CertificateRequest, error) {
+ derBlock, _ := pem.Decode(csr)
+ if derBlock == nil || derBlock.Bytes == nil {
+ return nil, errors.New("no CSR found while decoding")
+ }
+ correctType := false
+ acceptedHeaders := []string{"CERTIFICATE REQUEST", "NEW CERTIFICATE REQUEST"}
+ for _, v := range acceptedHeaders {
+ if derBlock.Type == v {
+ correctType = true
+ }
+ }
+ if !correctType {
+ return nil, fmt.Errorf("DER type %v is not of any type %v for CSR", derBlock.Type, acceptedHeaders)
+ }
+
+ return x509.ParseCertificateRequest(derBlock.Bytes)
+}
+
+// GenerateSerialNumber creates a compliant serial number as per RFC 5280 4.1.2.2.
+// Serial numbers must be positive, and can be no longer than 20 bytes.
+// The serial number is generated with 159 bits, so that the first bit will always
+// be 0, resulting in a positive serial number.
+func GenerateSerialNumber() (*big.Int, error) {
+ // Pick a random number from 0 to 2^159.
+ serial, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil))
+ if err != nil {
+ return nil, errors.New("error generating serial number")
+ }
+ return serial, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go
new file mode 100644
index 000000000..3fa6e7ba5
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go
@@ -0,0 +1,31 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "encoding/pem"
+)
+
+// PEMType is a specific type for string constants used during PEM encoding and decoding
+type PEMType string
+
+// PEMEncode encodes the specified byte slice in PEM format using the provided type string
+func PEMEncode(typeStr PEMType, bytes []byte) []byte {
+ return pem.EncodeToMemory(&pem.Block{
+ Type: string(typeStr),
+ Bytes: bytes,
+ })
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go
new file mode 100644
index 000000000..72fe1aa3a
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go
@@ -0,0 +1,96 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "golang.org/x/term"
+)
+
+// PassFunc is a type of function that takes a boolean (representing whether confirmation is desired) and returns the password as read, along with an error if one occurred
+type PassFunc func(bool) ([]byte, error)
+
+var (
+ // Read is for fuzzing
+ Read = readPasswordFn
+)
+
+// readPasswordFn reads the password from the following sources, in order of preference:
+//
+// - COSIGN_PASSWORD environment variable
+//
+// - user input from from terminal (if present)
+//
+// - provided to stdin from pipe
+func readPasswordFn() func() ([]byte, error) {
+ if pw, ok := os.LookupEnv("COSIGN_PASSWORD"); ok {
+ return func() ([]byte, error) {
+ return []byte(pw), nil
+ }
+ }
+ if term.IsTerminal(0) {
+ return func() ([]byte, error) {
+ return term.ReadPassword(0)
+ }
+ }
+ // Handle piped in passwords.
+ return func() ([]byte, error) {
+ return ioutil.ReadAll(os.Stdin)
+ }
+}
+
+// StaticPasswordFunc returns a PassFunc which returns the provided password.
+func StaticPasswordFunc(pw []byte) PassFunc {
+ return func(bool) ([]byte, error) {
+ return pw, nil
+ }
+}
+
+// SkipPassword is a PassFunc that does not interact with a user, but
+// simply returns nil for both the password result and error struct.
+func SkipPassword(_ bool) ([]byte, error) {
+ return nil, nil
+}
+
+// GetPasswordFromStdIn gathers the password from stdin with an
+// optional confirmation step.
+func GetPasswordFromStdIn(confirm bool) ([]byte, error) {
+ read := Read()
+ fmt.Fprint(os.Stderr, "Enter password for private key: ")
+ pw1, err := read()
+ fmt.Fprintln(os.Stderr)
+ if err != nil {
+ return nil, err
+ }
+ if !confirm {
+ return pw1, nil
+ }
+ fmt.Fprint(os.Stderr, "Enter again: ")
+ pw2, err := read()
+ fmt.Fprintln(os.Stderr)
+ if err != nil {
+ return nil, err
+ }
+
+ if string(pw1) != string(pw2) {
+ return nil, errors.New("passwords do not match")
+ }
+ return pw1, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
new file mode 100644
index 000000000..d97bf36bf
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
@@ -0,0 +1,144 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/theupdateframework/go-tuf/encrypted"
+)
+
+const (
+ // PrivateKeyPEMType is the string "PRIVATE KEY" to be used during PEM encoding and decoding
+ PrivateKeyPEMType PEMType = "PRIVATE KEY"
+ encryptedCosignPrivateKeyPEMType PEMType = "ENCRYPTED COSIGN PRIVATE KEY"
+ // EncryptedSigstorePrivateKeyPEMType is the string "ENCRYPTED SIGSTORE PRIVATE KEY" to be used during PEM encoding and decoding
+ EncryptedSigstorePrivateKeyPEMType PEMType = "ENCRYPTED SIGSTORE PRIVATE KEY"
+)
+
+func pemEncodeKeyPair(priv crypto.PrivateKey, pub crypto.PublicKey, pf PassFunc) (privPEM, pubPEM []byte, err error) {
+ pubPEM, err = MarshalPublicKeyToPEM(pub)
+ if err != nil {
+ return nil, nil, err
+ }
+ derBytes, err := MarshalPrivateKeyToDER(priv)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if pf == nil {
+ return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil
+ }
+ password, err := pf(true)
+ if err != nil {
+ return nil, nil, err
+ }
+ if password == nil {
+ return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil
+ }
+ if derBytes, err = encrypted.Encrypt(derBytes, password); err != nil {
+ return nil, nil, err
+ }
+ return PEMEncode(EncryptedSigstorePrivateKeyPEMType, derBytes), pubPEM, nil
+}
+
+// GeneratePEMEncodedECDSAKeyPair generates an ECDSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded.
+func GeneratePEMEncodedECDSAKeyPair(curve elliptic.Curve, pf PassFunc) (privPEM, pubPEM []byte, err error) {
+ priv, err := ecdsa.GenerateKey(curve, rand.Reader)
+ if err != nil {
+ return nil, nil, err
+ }
+ return pemEncodeKeyPair(priv, priv.Public(), pf)
+}
+
+// GeneratePEMEncodedRSAKeyPair generates an RSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded.
+func GeneratePEMEncodedRSAKeyPair(keyLengthBits int, pf PassFunc) (privPEM, pubPEM []byte, err error) {
+ priv, err := rsa.GenerateKey(rand.Reader, keyLengthBits)
+ if err != nil {
+ return nil, nil, err
+ }
+ return pemEncodeKeyPair(priv, priv.Public(), pf)
+}
+
+// MarshalPrivateKeyToEncryptedDER marshals the private key and encrypts the DER-encoded value using the specified password function
+func MarshalPrivateKeyToEncryptedDER(priv crypto.PrivateKey, pf PassFunc) ([]byte, error) {
+ derKey, err := MarshalPrivateKeyToDER(priv)
+ if err != nil {
+ return nil, err
+ }
+ password, err := pf(true)
+ if err != nil {
+ return nil, err
+ }
+ if password == nil {
+ return nil, errors.New("password was nil")
+ }
+ return encrypted.Encrypt(derKey, password)
+}
+
+// UnmarshalPEMToPrivateKey converts a PEM-encoded byte slice into a crypto.PrivateKey
+func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey, error) {
+ derBlock, _ := pem.Decode(pemBytes)
+ if derBlock == nil {
+ return nil, errors.New("PEM decoding failed")
+ }
+ switch derBlock.Type {
+ case string(PrivateKeyPEMType):
+ return x509.ParsePKCS8PrivateKey(derBlock.Bytes)
+ case string(EncryptedSigstorePrivateKeyPEMType), string(encryptedCosignPrivateKeyPEMType):
+ derBytes := derBlock.Bytes
+ if pf != nil {
+ password, err := pf(false)
+ if err != nil {
+ return nil, err
+ }
+ if password != nil {
+ derBytes, err = encrypted.Decrypt(derBytes, password)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return x509.ParsePKCS8PrivateKey(derBytes)
+ }
+ return nil, fmt.Errorf("unknown PEM file type: %v", derBlock.Type)
+}
+
+// MarshalPrivateKeyToDER converts a crypto.PrivateKey into a PKCS8 ASN.1 DER byte slice
+func MarshalPrivateKeyToDER(priv crypto.PrivateKey) ([]byte, error) {
+ if priv == nil {
+ return nil, errors.New("empty key")
+ }
+ return x509.MarshalPKCS8PrivateKey(priv)
+}
+
+// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PEM-encoded byte slice
+func MarshalPrivateKeyToPEM(priv crypto.PrivateKey) ([]byte, error) {
+ derBytes, err := MarshalPrivateKeyToDER(priv)
+ if err != nil {
+ return nil, err
+ }
+ return PEMEncode(PrivateKeyPEMType, derBytes), nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
new file mode 100644
index 000000000..fd0a63432
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
@@ -0,0 +1,174 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha1" // nolint:gosec
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/letsencrypt/boulder/goodkey"
+)
+
+const (
+ // PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding
+ PublicKeyPEMType PEMType = "PUBLIC KEY"
+)
+
+// subjectPublicKeyInfo is used to construct a subject key ID.
+// https://tools.ietf.org/html/rfc5280#section-4.1.2.7
+type subjectPublicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ SubjectPublicKey asn1.BitString
+}
+
+// UnmarshalPEMToPublicKey converts a PEM-encoded byte slice into a crypto.PublicKey
+func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) {
+ derBytes, _ := pem.Decode(pemBytes)
+ if derBytes == nil {
+ return nil, errors.New("PEM decoding failed")
+ }
+ return x509.ParsePKIXPublicKey(derBytes.Bytes)
+}
+
+// MarshalPublicKeyToDER converts a crypto.PublicKey into a PKIX, ASN.1 DER byte slice
+func MarshalPublicKeyToDER(pub crypto.PublicKey) ([]byte, error) {
+ if pub == nil {
+ return nil, errors.New("empty key")
+ }
+ return x509.MarshalPKIXPublicKey(pub)
+}
+
+// MarshalPublicKeyToPEM converts a crypto.PublicKey into a PEM-encoded byte slice
+func MarshalPublicKeyToPEM(pub crypto.PublicKey) ([]byte, error) {
+ derBytes, err := MarshalPublicKeyToDER(pub)
+ if err != nil {
+ return nil, err
+ }
+ return PEMEncode(PublicKeyPEMType, derBytes), nil
+}
+
+// SKID generates a 160-bit SHA-1 hash of the value of the BIT STRING
+// subjectPublicKey (excluding the tag, length, and number of unused bits).
+// https://tools.ietf.org/html/rfc5280#section-4.2.1.2
+func SKID(pub crypto.PublicKey) ([]byte, error) {
+ derPubBytes, err := x509.MarshalPKIXPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ var spki subjectPublicKeyInfo
+ if _, err := asn1.Unmarshal(derPubBytes, &spki); err != nil {
+ return nil, err
+ }
+ skid := sha1.Sum(spki.SubjectPublicKey.Bytes) // nolint:gosec
+ return skid[:], nil
+}
+
+// EqualKeys compares two public keys. Supports RSA, ECDSA and ED25519.
+// If not equal, the error message contains hex-encoded SHA1 hashes of the DER-encoded keys
+func EqualKeys(first, second crypto.PublicKey) error {
+ switch pub := first.(type) {
+ case *rsa.PublicKey:
+ if !pub.Equal(second) {
+ return fmt.Errorf(genErrMsg(first, second, "rsa"))
+ }
+ case *ecdsa.PublicKey:
+ if !pub.Equal(second) {
+ return fmt.Errorf(genErrMsg(first, second, "ecdsa"))
+ }
+ case ed25519.PublicKey:
+ if !pub.Equal(second) {
+ return fmt.Errorf(genErrMsg(first, second, "ed25519"))
+ }
+ default:
+ return errors.New("unsupported key type")
+ }
+ return nil
+}
+
+// genErrMsg generates an error message for EqualKeys
+func genErrMsg(first, second crypto.PublicKey, keyType string) string {
+ msg := fmt.Sprintf("%s public keys are not equal", keyType)
+ // Calculate SKID to include in error message
+ firstSKID, err := SKID(first)
+ if err != nil {
+ return msg
+ }
+ secondSKID, err := SKID(second)
+ if err != nil {
+ return msg
+ }
+ return fmt.Sprintf("%s (%s, %s)", msg, hex.EncodeToString(firstSKID), hex.EncodeToString(secondSKID))
+}
+
+// ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key.
+func ValidatePubKey(pub crypto.PublicKey) error {
+ switch pk := pub.(type) {
+ case *rsa.PublicKey:
+ // goodkey policy enforces:
+ // * Size of key: 2048 <= size <= 4096, size % 8 = 0
+ // * Exponent E = 65537 (Default exponent for OpenSSL and Golang)
+ // * Small primes check for modulus
+ // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
+ // * Key is easily factored with Fermat's factorization method
+ p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil)
+ if err != nil {
+ // Should not occur, only chances to return errors are if fermat rounds
+ // are <0 or when loading blocked/weak keys from disk (not used here)
+ return errors.New("unable to initialize key policy")
+ }
+ // ctx is unused
+ return p.GoodKey(context.Background(), pub)
+ case *ecdsa.PublicKey:
+ // Unable to use goodkey policy because P-521 curve is not supported
+ return validateEcdsaKey(pk)
+ case ed25519.PublicKey:
+ return validateEd25519Key(pk)
+ }
+ return errors.New("unsupported public key type")
+}
+
+// Enforce that the ECDSA key curve is one of:
+// * NIST P-256 (secp256r1, prime256v1)
+// * NIST P-384
+// * NIST P-521.
+// Other EC curves, like secp256k1, are not supported by Go.
+func validateEcdsaKey(pub *ecdsa.PublicKey) error {
+ switch pub.Curve {
+ case elliptic.P224():
+ return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521")
+ case elliptic.P256(), elliptic.P384(), elliptic.P521():
+ return nil
+ default:
+ return fmt.Errorf("unexpected ec curve")
+ }
+}
+
+// No validations currently, ED25519 supports only one key size.
+func validateEd25519Key(pub ed25519.PublicKey) error {
+ return nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go
new file mode 100644
index 000000000..dbd3314f6
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go
@@ -0,0 +1,17 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package signature contains types and utilities related to Sigstore signatures.
+package signature
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go
new file mode 100644
index 000000000..dfbd5793d
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go
@@ -0,0 +1,244 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// checked on LoadSigner, LoadVerifier and SignMessage
+var ecdsaSupportedHashFuncs = []crypto.Hash{
+ crypto.SHA256,
+ crypto.SHA512,
+ crypto.SHA384,
+ crypto.SHA224,
+}
+
+// checked on VerifySignature. Supports SHA1 verification.
+var ecdsaSupportedVerifyHashFuncs = []crypto.Hash{
+ crypto.SHA256,
+ crypto.SHA512,
+ crypto.SHA384,
+ crypto.SHA224,
+ crypto.SHA1,
+}
+
+// ECDSASigner is a signature.Signer that uses an Elliptic Curve DSA algorithm
+type ECDSASigner struct {
+ hashFunc crypto.Hash
+ priv *ecdsa.PrivateKey
+}
+
+// LoadECDSASigner calculates signatures using the specified private key and hash algorithm.
+//
+// hf must not be crypto.Hash(0).
+func LoadECDSASigner(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASigner, error) {
+ if priv == nil {
+ return nil, errors.New("invalid ECDSA private key specified")
+ }
+
+ if !isSupportedAlg(hf, ecdsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &ECDSASigner{
+ priv: priv,
+ hashFunc: hf,
+ }, nil
+}
+
+// SignMessage signs the provided message. If the message is provided,
+// this method will compute the digest according to the hash function specified
+// when the ECDSASigner was created.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithRand()
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (e ECDSASigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) {
+ digest, _, err := ComputeDigestForSigning(message, e.hashFunc, ecdsaSupportedHashFuncs, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ rand := selectRandFromOpts(opts...)
+
+ return ecdsa.SignASN1(rand, e.priv, digest)
+}
+
+// Public returns the public key that can be used to verify signatures created by
+// this signer.
+func (e ECDSASigner) Public() crypto.PublicKey {
+ if e.priv == nil {
+ return nil
+ }
+
+ return e.priv.Public()
+}
+
+// PublicKey returns the public key that can be used to verify signatures created by
+// this signer. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ECDSASigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.Public(), nil
+}
+
+// Sign computes the signature for the specified digest. If a source of entropy is
+// given in rand, it will be used instead of the default value (rand.Reader from crypto/rand).
+//
+// If opts are specified, the hash function in opts.Hash should be the one used to compute
+// digest. If opts are not specified, the value provided when the signer was created will be used instead.
+func (e ECDSASigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
+ ecdsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)}
+ if opts != nil {
+ ecdsaOpts = append(ecdsaOpts, options.WithCryptoSignerOpts(opts))
+ }
+
+ return e.SignMessage(nil, ecdsaOpts...)
+}
+
+// ECDSAVerifier is a signature.Verifier that uses an Elliptic Curve DSA algorithm
+type ECDSAVerifier struct {
+ publicKey *ecdsa.PublicKey
+ hashFunc crypto.Hash
+}
+
+// LoadECDSAVerifier returns a Verifier that verifies signatures using the specified
+// ECDSA public key and hash algorithm.
+//
+// hf must not be crypto.Hash(0).
+func LoadECDSAVerifier(pub *ecdsa.PublicKey, hashFunc crypto.Hash) (*ECDSAVerifier, error) {
+ if pub == nil {
+ return nil, errors.New("invalid ECDSA public key specified")
+ }
+
+ if !isSupportedAlg(hashFunc, ecdsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &ECDSAVerifier{
+ publicKey: pub,
+ hashFunc: hashFunc,
+ }, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ECDSAVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.publicKey, nil
+}
+
+// VerifySignature verifies the signature for the given message. Unless provided
+// in an option, the digest of the message will be computed using the hash function specified
+// when the ECDSAVerifier was created.
+//
+// This function returns nil if the verification succeeded, and an error message otherwise.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithDigest()
+//
+// All other options are ignored if specified.
+func (e ECDSAVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error {
+ digest, _, err := ComputeDigestForVerifying(message, e.hashFunc, ecdsaSupportedVerifyHashFuncs, opts...)
+ if err != nil {
+ return err
+ }
+
+ if signature == nil {
+ return errors.New("nil signature passed to VerifySignature")
+ }
+
+ sigBytes, err := io.ReadAll(signature)
+ if err != nil {
+ return fmt.Errorf("reading signature: %w", err)
+ }
+
+ if !ecdsa.VerifyASN1(e.publicKey, digest, sigBytes) {
+ return errors.New("invalid signature when validating ASN.1 encoded signature")
+ }
+
+ return nil
+}
+
+// ECDSASignerVerifier is a signature.SignerVerifier that uses an Elliptic Curve DSA algorithm
+type ECDSASignerVerifier struct {
+ *ECDSASigner
+ *ECDSAVerifier
+}
+
+// LoadECDSASignerVerifier creates a combined signer and verifier. This is a convenience object
+// that simply wraps an instance of ECDSASigner and ECDSAVerifier.
+func LoadECDSASignerVerifier(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASignerVerifier, error) {
+ signer, err := LoadECDSASigner(priv, hf)
+ if err != nil {
+ return nil, fmt.Errorf("initializing signer: %w", err)
+ }
+ verifier, err := LoadECDSAVerifier(&priv.PublicKey, hf)
+ if err != nil {
+ return nil, fmt.Errorf("initializing verifier: %w", err)
+ }
+
+ return &ECDSASignerVerifier{
+ ECDSASigner: signer,
+ ECDSAVerifier: verifier,
+ }, nil
+}
+
+// NewDefaultECDSASignerVerifier creates a combined signer and verifier using ECDSA.
+//
+// This creates a new ECDSA key using the P-256 curve and uses the SHA256 hashing algorithm.
+func NewDefaultECDSASignerVerifier() (*ECDSASignerVerifier, *ecdsa.PrivateKey, error) {
+ return NewECDSASignerVerifier(elliptic.P256(), rand.Reader, crypto.SHA256)
+}
+
+// NewECDSASignerVerifier creates a combined signer and verifier using ECDSA.
+//
+// This creates a new ECDSA key using the specified elliptic curve, entropy source, and hashing function.
+func NewECDSASignerVerifier(curve elliptic.Curve, rand io.Reader, hashFunc crypto.Hash) (*ECDSASignerVerifier, *ecdsa.PrivateKey, error) {
+ priv, err := ecdsa.GenerateKey(curve, rand)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sv, err := LoadECDSASignerVerifier(priv, hashFunc)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sv, priv, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ECDSASignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.publicKey, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go
new file mode 100644
index 000000000..116c34c7c
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go
@@ -0,0 +1,197 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+)
+
+var ed25519SupportedHashFuncs = []crypto.Hash{
+ crypto.Hash(0),
+}
+
+// ED25519Signer is a signature.Signer that uses the Ed25519 public-key signature system
+type ED25519Signer struct {
+ priv ed25519.PrivateKey
+}
+
+// LoadED25519Signer calculates signatures using the specified private key.
+func LoadED25519Signer(priv ed25519.PrivateKey) (*ED25519Signer, error) {
+ if priv == nil {
+ return nil, errors.New("invalid ED25519 private key specified")
+ }
+
+ // check this to avoid panic and throw error gracefully
+ if len(priv) != ed25519.PrivateKeySize {
+ return nil, errors.New("invalid size for ED25519 key")
+ }
+
+ return &ED25519Signer{
+ priv: priv,
+ }, nil
+}
+
+// SignMessage signs the provided message. Passing the WithDigest option is not
+// supported as ED25519 performs a two pass hash over the message during the
+// signing process.
+//
+// All options are ignored.
+func (e ED25519Signer) SignMessage(message io.Reader, _ ...SignOption) ([]byte, error) {
+ messageBytes, _, err := ComputeDigestForSigning(message, crypto.Hash(0), ed25519SupportedHashFuncs)
+ if err != nil {
+ return nil, err
+ }
+
+ return ed25519.Sign(e.priv, messageBytes), nil
+}
+
+// Public returns the public key that can be used to verify signatures created by
+// this signer.
+func (e ED25519Signer) Public() crypto.PublicKey {
+ if e.priv == nil {
+ return nil
+ }
+
+ return e.priv.Public()
+}
+
+// PublicKey returns the public key that can be used to verify signatures created by
+// this signer. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ED25519Signer) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.Public(), nil
+}
+
+// Sign computes the signature for the specified message; the first and third arguments to this
+// function are ignored as they are not used by the ED25519 algorithm.
+func (e ED25519Signer) Sign(_ io.Reader, message []byte, _ crypto.SignerOpts) ([]byte, error) {
+ if message == nil {
+ return nil, errors.New("message must not be nil")
+ }
+ return e.SignMessage(bytes.NewReader(message))
+}
+
+// ED25519Verifier is a signature.Verifier that uses the Ed25519 public-key signature system
+type ED25519Verifier struct {
+ publicKey ed25519.PublicKey
+}
+
+// LoadED25519Verifier returns a Verifier that verifies signatures using the specified ED25519 public key.
+func LoadED25519Verifier(pub ed25519.PublicKey) (*ED25519Verifier, error) {
+ if pub == nil {
+ return nil, errors.New("invalid ED25519 public key specified")
+ }
+
+ return &ED25519Verifier{
+ publicKey: pub,
+ }, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e *ED25519Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.publicKey, nil
+}
+
+// VerifySignature verifies the signature for the given message.
+//
+// This function returns nil if the verification succeeded, and an error message otherwise.
+//
+// All options are ignored if specified.
+func (e *ED25519Verifier) VerifySignature(signature, message io.Reader, _ ...VerifyOption) error {
+ messageBytes, _, err := ComputeDigestForVerifying(message, crypto.Hash(0), ed25519SupportedHashFuncs)
+ if err != nil {
+ return err
+ }
+
+ if signature == nil {
+ return errors.New("nil signature passed to VerifySignature")
+ }
+
+ sigBytes, err := io.ReadAll(signature)
+ if err != nil {
+ return fmt.Errorf("reading signature: %w", err)
+ }
+
+ if !ed25519.Verify(e.publicKey, messageBytes, sigBytes) {
+ return errors.New("failed to verify signature")
+ }
+ return nil
+}
+
+// ED25519SignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system
+type ED25519SignerVerifier struct {
+ *ED25519Signer
+ *ED25519Verifier
+}
+
+// LoadED25519SignerVerifier creates a combined signer and verifier. This is
+// a convenience object that simply wraps an instance of ED25519Signer and ED25519Verifier.
+func LoadED25519SignerVerifier(priv ed25519.PrivateKey) (*ED25519SignerVerifier, error) {
+ signer, err := LoadED25519Signer(priv)
+ if err != nil {
+ return nil, fmt.Errorf("initializing signer: %w", err)
+ }
+ pub, ok := priv.Public().(ed25519.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("given key is not ed25519.PublicKey: %w", err)
+ }
+ verifier, err := LoadED25519Verifier(pub)
+ if err != nil {
+ return nil, fmt.Errorf("initializing verifier: %w", err)
+ }
+
+ return &ED25519SignerVerifier{
+ ED25519Signer: signer,
+ ED25519Verifier: verifier,
+ }, nil
+}
+
+// NewDefaultED25519SignerVerifier creates a combined signer and verifier using ED25519.
+// This creates a new ED25519 key using crypto/rand as an entropy source.
+func NewDefaultED25519SignerVerifier() (*ED25519SignerVerifier, ed25519.PrivateKey, error) {
+ return NewED25519SignerVerifier(rand.Reader)
+}
+
+// NewED25519SignerVerifier creates a combined signer and verifier using ED25519.
+// This creates a new ED25519 key using the specified entropy source.
+func NewED25519SignerVerifier(rand io.Reader) (*ED25519SignerVerifier, ed25519.PrivateKey, error) {
+ _, priv, err := ed25519.GenerateKey(rand)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sv, err := LoadED25519SignerVerifier(priv)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sv, priv, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ED25519SignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.publicKey, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go
new file mode 100644
index 000000000..6f8449eea
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go
@@ -0,0 +1,111 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ crand "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+)
+
+func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool {
+ if supportedAlgs == nil {
+ return true
+ }
+ for _, supportedAlg := range supportedAlgs {
+ if alg == supportedAlg {
+ return true
+ }
+ }
+ return false
+}
+
+// ComputeDigestForSigning calculates the digest value for the specified message using a hash function selected by the following process:
+//
+// - if a digest value is already specified in a SignOption and the length of the digest matches that of the selected hash function, the
+// digest value will be returned without any further computation
+// - if a hash function is given using WithCryptoSignerOpts(opts) as a SignOption, it will be used (if it is in the supported list)
+// - otherwise defaultHashFunc will be used (if it is in the supported list)
+func ComputeDigestForSigning(rawMessage io.Reader, defaultHashFunc crypto.Hash, supportedHashFuncs []crypto.Hash, opts ...SignOption) (digest []byte, hashedWith crypto.Hash, err error) {
+ var cryptoSignerOpts crypto.SignerOpts = defaultHashFunc
+ for _, opt := range opts {
+ opt.ApplyDigest(&digest)
+ opt.ApplyCryptoSignerOpts(&cryptoSignerOpts)
+ }
+ hashedWith = cryptoSignerOpts.HashFunc()
+ if !isSupportedAlg(hashedWith, supportedHashFuncs) {
+ return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs)
+ }
+ if len(digest) > 0 {
+ if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() {
+ err = errors.New("unexpected length of digest for hash function specified")
+ }
+ return
+ }
+ digest, err = hashMessage(rawMessage, hashedWith)
+ return
+}
+
+// ComputeDigestForVerifying calculates the digest value for the specified message using a hash function selected by the following process:
+//
+// - if a digest value is already specified in a SignOption and the length of the digest matches that of the selected hash function, the
+// digest value will be returned without any further computation
+// - if a hash function is given using WithCryptoSignerOpts(opts) as a SignOption, it will be used (if it is in the supported list)
+// - otherwise defaultHashFunc will be used (if it is in the supported list)
+func ComputeDigestForVerifying(rawMessage io.Reader, defaultHashFunc crypto.Hash, supportedHashFuncs []crypto.Hash, opts ...VerifyOption) (digest []byte, hashedWith crypto.Hash, err error) {
+ var cryptoSignerOpts crypto.SignerOpts = defaultHashFunc
+ for _, opt := range opts {
+ opt.ApplyDigest(&digest)
+ opt.ApplyCryptoSignerOpts(&cryptoSignerOpts)
+ }
+ hashedWith = cryptoSignerOpts.HashFunc()
+ if !isSupportedAlg(hashedWith, supportedHashFuncs) {
+ return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs)
+ }
+ if len(digest) > 0 {
+ if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() {
+ err = errors.New("unexpected length of digest for hash function specified")
+ }
+ return
+ }
+ digest, err = hashMessage(rawMessage, hashedWith)
+ return
+}
+
+func hashMessage(rawMessage io.Reader, hashFunc crypto.Hash) ([]byte, error) {
+ if rawMessage == nil {
+ return nil, errors.New("message cannot be nil")
+ }
+ if hashFunc == crypto.Hash(0) {
+ return io.ReadAll(rawMessage)
+ }
+ hasher := hashFunc.New()
+ // avoids reading entire message into memory
+ if _, err := io.Copy(hasher, rawMessage); err != nil {
+ return nil, fmt.Errorf("hashing message: %w", err)
+ }
+ return hasher.Sum(nil), nil
+}
+
+func selectRandFromOpts(opts ...SignOption) io.Reader {
+ rand := crand.Reader
+ for _, opt := range opts {
+ opt.ApplyRand(&rand)
+ }
+ return rand
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go
new file mode 100644
index 000000000..0be699f7e
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go
@@ -0,0 +1,57 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "context"
+ "crypto"
+ "io"
+
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// RPCOption specifies options to be used when performing RPC
+type RPCOption interface {
+ ApplyContext(*context.Context)
+ ApplyRemoteVerification(*bool)
+ ApplyRPCAuthOpts(opts *options.RPCAuth)
+ ApplyKeyVersion(keyVersion *string)
+}
+
+// PublicKeyOption specifies options to be used when obtaining a public key
+type PublicKeyOption interface {
+ RPCOption
+}
+
+// MessageOption specifies options to be used when processing messages during signing or verification
+type MessageOption interface {
+ ApplyDigest(*[]byte)
+ ApplyCryptoSignerOpts(*crypto.SignerOpts)
+}
+
+// SignOption specifies options to be used when signing a message
+type SignOption interface {
+ RPCOption
+ MessageOption
+ ApplyRand(*io.Reader)
+ ApplyKeyVersionUsed(**string)
+}
+
+// VerifyOption specifies options to be used when verifying a signature
+type VerifyOption interface {
+ RPCOption
+ MessageOption
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go
new file mode 100644
index 000000000..903e6261b
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go
@@ -0,0 +1,36 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ "context"
+)
+
+// RequestContext implements the functional option pattern for including a context during RPC
+type RequestContext struct {
+ NoOpOptionImpl
+ ctx context.Context
+}
+
+// ApplyContext sets the specified context as the functional option
+func (r RequestContext) ApplyContext(ctx *context.Context) {
+ *ctx = r.ctx
+}
+
+// WithContext specifies that the given context should be used in RPC to external services
+func WithContext(ctx context.Context) RequestContext {
+ return RequestContext{ctx: ctx}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go
new file mode 100644
index 000000000..21875dc8c
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go
@@ -0,0 +1,35 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+// RequestDigest implements the functional option pattern for specifying a digest value
+type RequestDigest struct {
+ NoOpOptionImpl
+ digest []byte
+}
+
+// ApplyDigest sets the specified digest value as the functional option
+func (r RequestDigest) ApplyDigest(digest *[]byte) {
+ *digest = r.digest
+}
+
+// WithDigest specifies that the given digest can be used by underlying signature implementations
+// WARNING: When verifying a digest with ECDSA, it is trivial to craft a valid signature
+// over a random message given a public key. Do not use this unles you understand the
+// implications and do not need to protect against malleability.
+func WithDigest(digest []byte) RequestDigest {
+ return RequestDigest{digest: digest}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go
new file mode 100644
index 000000000..751418f9d
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go
@@ -0,0 +1,50 @@
+//
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+// RequestKeyVersion implements the functional option pattern for specifying the KMS key version during signing or verification
+type RequestKeyVersion struct {
+ NoOpOptionImpl
+ keyVersion string
+}
+
+// ApplyKeyVersion sets the KMS's key version as a functional option
+func (r RequestKeyVersion) ApplyKeyVersion(keyVersion *string) {
+ *keyVersion = r.keyVersion
+}
+
+// WithKeyVersion specifies that a specific KMS key version be used during signing and verification operations;
+// a value of 0 will use the latest version of the key (default)
+func WithKeyVersion(keyVersion string) RequestKeyVersion {
+ return RequestKeyVersion{keyVersion: keyVersion}
+}
+
+// RequestKeyVersionUsed implements the functional option pattern for obtaining the KMS key version used during signing
+type RequestKeyVersionUsed struct {
+ NoOpOptionImpl
+ keyVersionUsed *string
+}
+
+// ApplyKeyVersionUsed requests to store the KMS's key version that was used as a functional option
+func (r RequestKeyVersionUsed) ApplyKeyVersionUsed(keyVersionUsed **string) {
+ *keyVersionUsed = r.keyVersionUsed
+}
+
+// ReturnKeyVersionUsed specifies that the specific KMS key version that was used during signing should be stored
+// in the pointer provided
+func ReturnKeyVersionUsed(keyVersionUsed *string) RequestKeyVersionUsed {
+ return RequestKeyVersionUsed{keyVersionUsed: keyVersionUsed}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
new file mode 100644
index 000000000..04e4f3b5f
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
@@ -0,0 +1,49 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ "context"
+ "crypto"
+ "io"
+)
+
+// NoOpOptionImpl implements the RPCOption, SignOption, VerifyOption interfaces as no-ops.
+type NoOpOptionImpl struct{}
+
+// ApplyContext is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyContext(ctx *context.Context) {}
+
+// ApplyCryptoSignerOpts is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) {}
+
+// ApplyDigest is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyDigest(digest *[]byte) {}
+
+// ApplyRand is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyRand(rand *io.Reader) {}
+
+// ApplyRemoteVerification is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyRemoteVerification(remoteVerification *bool) {}
+
+// ApplyRPCAuthOpts is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyRPCAuthOpts(opts *RPCAuth) {}
+
+// ApplyKeyVersion is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyKeyVersion(keyVersion *string) {}
+
+// ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyKeyVersionUsed(keyVersion **string) {}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go
new file mode 100644
index 000000000..fd3a17f9e
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go
@@ -0,0 +1,41 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ crand "crypto/rand"
+ "io"
+)
+
+// RequestRand implements the functional option pattern for using a specific source of entropy
+type RequestRand struct {
+ NoOpOptionImpl
+ rand io.Reader
+}
+
+// ApplyRand sets the specified source of entropy as the functional option
+func (r RequestRand) ApplyRand(rand *io.Reader) {
+ *rand = r.rand
+}
+
+// WithRand specifies that the given source of entropy should be used in signing operations
+func WithRand(rand io.Reader) RequestRand {
+ r := rand
+ if r == nil {
+ r = crand.Reader
+ }
+ return RequestRand{rand: r}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go
new file mode 100644
index 000000000..26144adbb
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go
@@ -0,0 +1,32 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+// RequestRemoteVerification implements the functional option pattern for remotely verifiying signatures when possible
+type RequestRemoteVerification struct {
+ NoOpOptionImpl
+ remoteVerification bool
+}
+
+// ApplyRemoteVerification sets remote verification as a functional option
+func (r RequestRemoteVerification) ApplyRemoteVerification(remoteVerification *bool) {
+ *remoteVerification = r.remoteVerification
+}
+
+// WithRemoteVerification specifies that the verification operation should be performed remotely (vs in the process of the caller)
+func WithRemoteVerification(remoteVerification bool) RequestRemoteVerification {
+ return RequestRemoteVerification{remoteVerification: remoteVerification}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go
new file mode 100644
index 000000000..188de92dc
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go
@@ -0,0 +1,58 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+// RPCAuthOpts includes authentication settings for RPC calls
+type RPCAuthOpts struct {
+ NoOpOptionImpl
+ opts RPCAuth
+}
+
+// RPCAuth provides credentials for RPC calls, empty fields are ignored
+type RPCAuth struct {
+ Address string // address is the remote server address, e.g. https://vault:8200
+ Path string // path for the RPC, in vault this is the transit path which default to "transit"
+ Token string // token used for RPC, in vault this is the VAULT_TOKEN value
+ OIDC RPCAuthOIDC
+}
+
+// RPCAuthOIDC is used to perform the RPC login using OIDC instead of a fixed token
+type RPCAuthOIDC struct {
+ Path string // path defaults to "jwt" for vault
+ Role string // role is required for jwt logins
+ Token string // token is a jwt with vault
+}
+
+// ApplyRPCAuthOpts sets the RPCAuth as a function option
+func (r RPCAuthOpts) ApplyRPCAuthOpts(opts *RPCAuth) {
+ if r.opts.Address != "" {
+ opts.Address = r.opts.Address
+ }
+ if r.opts.Path != "" {
+ opts.Path = r.opts.Path
+ }
+ if r.opts.Token != "" {
+ opts.Token = r.opts.Token
+ }
+ if r.opts.OIDC.Token != "" {
+ opts.OIDC = r.opts.OIDC
+ }
+}
+
+// WithRPCAuthOpts specifies RPCAuth settings to be used with RPC logins
+func WithRPCAuthOpts(opts RPCAuth) RPCAuthOpts {
+ return RPCAuthOpts{opts: opts}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go
new file mode 100644
index 000000000..1a3ac7394
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go
@@ -0,0 +1,40 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ "crypto"
+)
+
+// RequestCryptoSignerOpts implements the functional option pattern for supplying crypto.SignerOpts when signing or verifying
+type RequestCryptoSignerOpts struct {
+ NoOpOptionImpl
+ opts crypto.SignerOpts
+}
+
+// ApplyCryptoSignerOpts sets crypto.SignerOpts as a functional option
+func (r RequestCryptoSignerOpts) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) {
+ *opts = r.opts
+}
+
+// WithCryptoSignerOpts specifies that provided crypto.SignerOpts be used during signing and verification operations
+func WithCryptoSignerOpts(opts crypto.SignerOpts) RequestCryptoSignerOpts {
+ var optsToUse crypto.SignerOpts = crypto.SHA256
+ if opts != nil {
+ optsToUse = opts
+ }
+ return RequestCryptoSignerOpts{opts: optsToUse}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
new file mode 100644
index 000000000..7db2ad80c
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
@@ -0,0 +1,104 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package payload
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/google/go-containerregistry/pkg/name"
+)
+
+// CosignSignatureType is the value of `critical.type` in a SimpleContainerImage payload.
+const CosignSignatureType = "cosign container image signature"
+
+// SimpleContainerImage describes the structure of a basic container image signature payload, as defined at:
+// https://github.com/containers/image/blob/master/docs/containers-signature.5.md#json-data-format
+type SimpleContainerImage struct {
+ Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature
+ Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image
+}
+
+// Critical data critical to correctly evaluating the validity of a signature
+type Critical struct {
+ Identity Identity `json:"identity"` // Identity claimed identity of the image
+ Image Image `json:"image"` // Image identifies the container that the signature applies to
+ Type string `json:"type"` // Type must be 'atomic container signature'
+}
+
+// Identity is the claimed identity of the image
+type Identity struct {
+ DockerReference string `json:"docker-reference"` // DockerReference is a reference used to refer to or download the image
+}
+
+// Image identifies the container image that the signature applies to
+type Image struct {
+ DockerManifestDigest string `json:"docker-manifest-digest"` // DockerManifestDigest the manifest digest of the signed container image
+}
+
+// Cosign describes a container image signed using Cosign
+type Cosign struct {
+ Image name.Digest
+ Annotations map[string]interface{}
+}
+
+// SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format
+func (p Cosign) SimpleContainerImage() SimpleContainerImage {
+ return SimpleContainerImage{
+ Critical: Critical{
+ Identity: Identity{
+ DockerReference: p.Image.Repository.Name(),
+ },
+ Image: Image{
+ DockerManifestDigest: p.Image.DigestStr(),
+ },
+ Type: CosignSignatureType,
+ },
+ Optional: p.Annotations,
+ }
+}
+
+// MarshalJSON marshals the container signature into a []byte of JSON data
+func (p Cosign) MarshalJSON() ([]byte, error) {
+ return json.Marshal(p.SimpleContainerImage())
+}
+
+var _ json.Marshaler = Cosign{}
+
+// UnmarshalJSON unmarshals []byte of JSON data into a container signature object
+func (p *Cosign) UnmarshalJSON(data []byte) error {
+ if string(data) == "null" {
+ // JSON "null" is a no-op by convention
+ return nil
+ }
+ var simple SimpleContainerImage
+ if err := json.Unmarshal(data, &simple); err != nil {
+ return err
+ }
+ if simple.Critical.Type != CosignSignatureType {
+ return fmt.Errorf("Cosign signature payload was of an unknown type: %q", simple.Critical.Type)
+ }
+ digestStr := simple.Critical.Identity.DockerReference + "@" + simple.Critical.Image.DockerManifestDigest
+ digest, err := name.NewDigest(digestStr)
+ if err != nil {
+ return fmt.Errorf("could not parse image digest string %q: %w", digestStr, err)
+ }
+ p.Image = digest
+ p.Annotations = simple.Optional
+ return nil
+}
+
+var _ json.Unmarshaler = (*Cosign)(nil)
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go
new file mode 100644
index 000000000..6f6a47a9f
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go
@@ -0,0 +1,25 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+)
+
+// PublicKeyProvider returns a PublicKey associated with a digital signature
+type PublicKeyProvider interface {
+ PublicKey(opts ...PublicKeyOption) (crypto.PublicKey, error)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go
new file mode 100644
index 000000000..1cac68a53
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go
@@ -0,0 +1,225 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// RSAPKCS1v15Signer is a signature.Signer that uses the RSA PKCS1v15 algorithm
+type RSAPKCS1v15Signer struct {
+ hashFunc crypto.Hash
+ priv *rsa.PrivateKey
+}
+
+// LoadRSAPKCS1v15Signer calculates signatures using the specified private key and hash algorithm.
+//
+// hf must be either SHA256, SHA388, or SHA512.
+func LoadRSAPKCS1v15Signer(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15Signer, error) {
+ if priv == nil {
+ return nil, errors.New("invalid RSA private key specified")
+ }
+
+ if !isSupportedAlg(hf, rsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &RSAPKCS1v15Signer{
+ priv: priv,
+ hashFunc: hf,
+ }, nil
+}
+
+// SignMessage signs the provided message using PKCS1v15. If the message is provided,
+// this method will compute the digest according to the hash function specified
+// when the RSAPKCS1v15Signer was created.
+//
+// SignMessage recognizes the following Options listed in order of preference:
+//
+// - WithRand()
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (r RSAPKCS1v15Signer) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) {
+ digest, hf, err := ComputeDigestForSigning(message, r.hashFunc, rsaSupportedHashFuncs, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ rand := selectRandFromOpts(opts...)
+
+ return rsa.SignPKCS1v15(rand, r.priv, hf, digest)
+}
+
+// Public returns the public key that can be used to verify signatures created by
+// this signer.
+func (r RSAPKCS1v15Signer) Public() crypto.PublicKey {
+ if r.priv == nil {
+ return nil
+ }
+
+ return r.priv.Public()
+}
+
+// PublicKey returns the public key that can be used to verify signatures created by
+// this signer. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPKCS1v15Signer) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.Public(), nil
+}
+
+// Sign computes the signature for the specified digest using PKCS1v15.
+//
+// If a source of entropy is given in rand, it will be used instead of the default value (rand.Reader
+// from crypto/rand).
+//
+// If opts are specified, they should specify the hash function used to compute digest. If opts are
+// not specified, this function assumes the hash function provided when the signer was created was
+// used to create the value specified in digest.
+func (r RSAPKCS1v15Signer) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
+ rsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)}
+ if opts != nil {
+ rsaOpts = append(rsaOpts, options.WithCryptoSignerOpts(opts))
+ }
+
+ return r.SignMessage(nil, rsaOpts...)
+}
+
+// RSAPKCS1v15Verifier is a signature.Verifier that uses the RSA PKCS1v15 algorithm
+type RSAPKCS1v15Verifier struct {
+ publicKey *rsa.PublicKey
+ hashFunc crypto.Hash
+}
+
+// LoadRSAPKCS1v15Verifier returns a Verifier that verifies signatures using the specified
+// RSA public key and hash algorithm.
+//
+// hf must be either SHA256, SHA388, or SHA512.
+func LoadRSAPKCS1v15Verifier(pub *rsa.PublicKey, hashFunc crypto.Hash) (*RSAPKCS1v15Verifier, error) {
+ if pub == nil {
+ return nil, errors.New("invalid RSA public key specified")
+ }
+
+ if !isSupportedAlg(hashFunc, rsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &RSAPKCS1v15Verifier{
+ publicKey: pub,
+ hashFunc: hashFunc,
+ }, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPKCS1v15Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.publicKey, nil
+}
+
+// VerifySignature verifies the signature for the given message using PKCS1v15. Unless provided
+// in an option, the digest of the message will be computed using the hash function specified
+// when the RSAPKCS1v15Verifier was created.
+//
+// This function returns nil if the verification succeeded, and an error message otherwise.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (r RSAPKCS1v15Verifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error {
+ digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...)
+ if err != nil {
+ return err
+ }
+
+ if signature == nil {
+ return errors.New("nil signature passed to VerifySignature")
+ }
+
+ sigBytes, err := io.ReadAll(signature)
+ if err != nil {
+ return fmt.Errorf("reading signature: %w", err)
+ }
+
+ return rsa.VerifyPKCS1v15(r.publicKey, hf, digest, sigBytes)
+}
+
+// RSAPKCS1v15SignerVerifier is a signature.SignerVerifier that uses the RSA PKCS1v15 algorithm
+type RSAPKCS1v15SignerVerifier struct {
+ *RSAPKCS1v15Signer
+ *RSAPKCS1v15Verifier
+}
+
+// LoadRSAPKCS1v15SignerVerifier creates a combined signer and verifier. This is a convenience object
+// that simply wraps an instance of RSAPKCS1v15Signer and RSAPKCS1v15Verifier.
+func LoadRSAPKCS1v15SignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15SignerVerifier, error) {
+ signer, err := LoadRSAPKCS1v15Signer(priv, hf)
+ if err != nil {
+ return nil, fmt.Errorf("initializing signer: %w", err)
+ }
+ verifier, err := LoadRSAPKCS1v15Verifier(&priv.PublicKey, hf)
+ if err != nil {
+ return nil, fmt.Errorf("initializing verifier: %w", err)
+ }
+
+ return &RSAPKCS1v15SignerVerifier{
+ RSAPKCS1v15Signer: signer,
+ RSAPKCS1v15Verifier: verifier,
+ }, nil
+}
+
+// NewDefaultRSAPKCS1v15SignerVerifier creates a combined signer and verifier using RSA PKCS1v15.
+// This creates a new RSA key of 2048 bits and uses the SHA256 hashing algorithm.
+func NewDefaultRSAPKCS1v15SignerVerifier() (*RSAPKCS1v15SignerVerifier, *rsa.PrivateKey, error) {
+ return NewRSAPKCS1v15SignerVerifier(rand.Reader, 2048, crypto.SHA256)
+}
+
+// NewRSAPKCS1v15SignerVerifier creates a combined signer and verifier using RSA PKCS1v15.
+// This creates a new RSA key of the specified length of bits, entropy source, and hash function.
+func NewRSAPKCS1v15SignerVerifier(rand io.Reader, bits int, hashFunc crypto.Hash) (*RSAPKCS1v15SignerVerifier, *rsa.PrivateKey, error) {
+ priv, err := rsa.GenerateKey(rand, bits)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sv, err := LoadRSAPKCS1v15SignerVerifier(priv, hashFunc)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sv, priv, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPKCS1v15SignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.publicKey, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go
new file mode 100644
index 000000000..6e52bed9b
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go
@@ -0,0 +1,260 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// checked on LoadSigner, LoadVerifier, and SignMessage
+var rsaSupportedHashFuncs = []crypto.Hash{
+ crypto.SHA256,
+ crypto.SHA384,
+ crypto.SHA512,
+}
+
+// checked on VerifySignature. Supports SHA1 verification.
+var rsaSupportedVerifyHashFuncs = []crypto.Hash{
+ crypto.SHA1,
+ crypto.SHA256,
+ crypto.SHA384,
+ crypto.SHA512,
+}
+
+// RSAPSSSigner is a signature.Signer that uses the RSA PSS algorithm
+type RSAPSSSigner struct {
+ hashFunc crypto.Hash
+ priv *rsa.PrivateKey
+ pssOpts *rsa.PSSOptions
+}
+
+// LoadRSAPSSSigner calculates signatures using the specified private key and hash algorithm.
+//
+// If opts are specified, then they will be stored and used as a default if not overridden
+// by the value passed to Sign().
+//
+// hf must be either SHA256, SHA388, or SHA512. opts.Hash is ignored.
+func LoadRSAPSSSigner(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSigner, error) {
+ if priv == nil {
+ return nil, errors.New("invalid RSA private key specified")
+ }
+
+ if !isSupportedAlg(hf, rsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &RSAPSSSigner{
+ priv: priv,
+ pssOpts: opts,
+ hashFunc: hf,
+ }, nil
+}
+
+// SignMessage signs the provided message using PSS. If the message is provided,
+// this method will compute the digest according to the hash function specified
+// when the RSAPSSSigner was created.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithRand()
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (r RSAPSSSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) {
+ digest, hf, err := ComputeDigestForSigning(message, r.hashFunc, rsaSupportedHashFuncs, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ rand := selectRandFromOpts(opts...)
+ pssOpts := r.pssOpts
+ if pssOpts == nil {
+ pssOpts = &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ }
+ }
+ pssOpts.Hash = hf
+
+ return rsa.SignPSS(rand, r.priv, hf, digest, pssOpts)
+}
+
+// Public returns the public key that can be used to verify signatures created by
+// this signer.
+func (r RSAPSSSigner) Public() crypto.PublicKey {
+ if r.priv == nil {
+ return nil
+ }
+
+ return r.priv.Public()
+}
+
+// PublicKey returns the public key that can be used to verify signatures created by
+// this signer. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPSSSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.Public(), nil
+}
+
+// Sign computes the signature for the specified digest using PSS.
+//
+// If a source of entropy is given in rand, it will be used instead of the default value (rand.Reader
+// from crypto/rand).
+//
+// If opts are specified, they must be *rsa.PSSOptions. If opts are not specified, the hash function
+// provided when the signer was created will be assumed.
+func (r RSAPSSSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
+ rsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)}
+ if opts != nil {
+ rsaOpts = append(rsaOpts, options.WithCryptoSignerOpts(opts))
+ }
+
+ return r.SignMessage(nil, rsaOpts...)
+}
+
+// RSAPSSVerifier is a signature.Verifier that uses the RSA PSS algorithm
+type RSAPSSVerifier struct {
+ publicKey *rsa.PublicKey
+ hashFunc crypto.Hash
+ pssOpts *rsa.PSSOptions
+}
+
+// LoadRSAPSSVerifier verifies signatures using the specified public key and hash algorithm.
+//
+// hf must be either SHA256, SHA388, or SHA512. opts.Hash is ignored.
+func LoadRSAPSSVerifier(pub *rsa.PublicKey, hashFunc crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSVerifier, error) {
+ if pub == nil {
+ return nil, errors.New("invalid RSA public key specified")
+ }
+
+ if !isSupportedAlg(hashFunc, rsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &RSAPSSVerifier{
+ publicKey: pub,
+ hashFunc: hashFunc,
+ pssOpts: opts,
+ }, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPSSVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.publicKey, nil
+}
+
+// VerifySignature verifies the signature for the given message using PSS. Unless provided
+// in an option, the digest of the message will be computed using the hash function specified
+// when the RSAPSSVerifier was created.
+//
+// This function returns nil if the verification succeeded, and an error message otherwise.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (r RSAPSSVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error {
+ digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...)
+ if err != nil {
+ return err
+ }
+
+ if signature == nil {
+ return errors.New("nil signature passed to VerifySignature")
+ }
+
+ sigBytes, err := io.ReadAll(signature)
+ if err != nil {
+ return fmt.Errorf("reading signature: %w", err)
+ }
+
+ // rsa.VerifyPSS ignores pssOpts.Hash, so we don't set it
+ pssOpts := r.pssOpts
+ if pssOpts == nil {
+ pssOpts = &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ }
+ }
+
+ return rsa.VerifyPSS(r.publicKey, hf, digest, sigBytes, pssOpts)
+}
+
+// RSAPSSSignerVerifier is a signature.SignerVerifier that uses the RSA PSS algorithm
+type RSAPSSSignerVerifier struct {
+ *RSAPSSSigner
+ *RSAPSSVerifier
+}
+
+// LoadRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS. This is
+// a convenience object that simply wraps an instance of RSAPSSSigner and RSAPSSVerifier.
+func LoadRSAPSSSignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSignerVerifier, error) {
+ signer, err := LoadRSAPSSSigner(priv, hf, opts)
+ if err != nil {
+ return nil, fmt.Errorf("initializing signer: %w", err)
+ }
+ verifier, err := LoadRSAPSSVerifier(&priv.PublicKey, hf, opts)
+ if err != nil {
+ return nil, fmt.Errorf("initializing verifier: %w", err)
+ }
+
+ return &RSAPSSSignerVerifier{
+ RSAPSSSigner: signer,
+ RSAPSSVerifier: verifier,
+ }, nil
+}
+
+// NewDefaultRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS.
+// This creates a new RSA key of 2048 bits and uses the SHA256 hashing algorithm.
+func NewDefaultRSAPSSSignerVerifier() (*RSAPSSSignerVerifier, *rsa.PrivateKey, error) {
+ return NewRSAPSSSignerVerifier(rand.Reader, 2048, crypto.SHA256)
+}
+
+// NewRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS.
+// This creates a new RSA key of the specified length of bits, entropy source, and hash function.
+func NewRSAPSSSignerVerifier(rand io.Reader, bits int, hashFunc crypto.Hash) (*RSAPSSSignerVerifier, *rsa.PrivateKey, error) {
+ priv, err := rsa.GenerateKey(rand, bits)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sv, err := LoadRSAPSSSignerVerifier(priv, hashFunc, &rsa.PSSOptions{Hash: hashFunc})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sv, priv, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPSSSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.publicKey, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
new file mode 100644
index 000000000..6dad67d08
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
@@ -0,0 +1,89 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "io"
+ "io/ioutil"
+ "path/filepath"
+
+ // these ensure we have the implementations loaded
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+
+ // these ensure we have the implementations loaded
+ _ "golang.org/x/crypto/sha3"
+)
+
+// Signer creates digital signatures over a message using a specified key pair
+type Signer interface {
+ PublicKeyProvider
+ SignMessage(message io.Reader, opts ...SignOption) ([]byte, error)
+}
+
+// SignerOpts implements crypto.SignerOpts but also allows callers to specify
+// additional options that may be utilized in signing the digest provided.
+type SignerOpts struct {
+ Hash crypto.Hash
+ Opts []SignOption
+}
+
+// HashFunc returns the hash function for this object
+func (s SignerOpts) HashFunc() crypto.Hash {
+ return s.Hash
+}
+
+// LoadSigner returns a signature.Signer based on the algorithm of the private key
+// provided.
+//
+// If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a
+// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly.
+func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) {
+ switch pk := privateKey.(type) {
+ case *rsa.PrivateKey:
+ return LoadRSAPKCS1v15Signer(pk, hashFunc)
+ case *ecdsa.PrivateKey:
+ return LoadECDSASigner(pk, hashFunc)
+ case ed25519.PrivateKey:
+ return LoadED25519Signer(pk)
+ }
+ return nil, errors.New("unsupported public key type")
+}
+
+// LoadSignerFromPEMFile returns a signature.Signer based on the algorithm of the private key
+// in the file. The Signer will use the hash function specified when computing digests.
+//
+// If key is an RSA key, a RSAPKCS1v15Signer will be returned. If a
+// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() and
+// cryptoutils.UnmarshalPEMToPrivateKey() methods directly.
+func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (Signer, error) {
+ fileBytes, err := ioutil.ReadFile(filepath.Clean(path))
+ if err != nil {
+ return nil, err
+ }
+ priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf)
+ if err != nil {
+ return nil, err
+ }
+ return LoadSigner(priv, hashFunc)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
new file mode 100644
index 000000000..9592654ed
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
@@ -0,0 +1,69 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "io/ioutil"
+ "path/filepath"
+
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+// SignerVerifier creates and verifies digital signatures over a message using a specified key pair
+type SignerVerifier interface {
+ Signer
+ Verifier
+}
+
+// LoadSignerVerifier returns a signature.SignerVerifier based on the algorithm of the private key
+// provided.
+//
+// If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a
+// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly.
+func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) {
+ switch pk := privateKey.(type) {
+ case *rsa.PrivateKey:
+ return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc)
+ case *ecdsa.PrivateKey:
+ return LoadECDSASignerVerifier(pk, hashFunc)
+ case ed25519.PrivateKey:
+ return LoadED25519SignerVerifier(pk)
+ }
+ return nil, errors.New("unsupported public key type")
+}
+
+// LoadSignerVerifierFromPEMFile returns a signature.SignerVerifier based on the algorithm of the private key
+// in the file. The SignerVerifier will use the hash function specified when computing digests.
+//
+// If publicKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a
+// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() and
+// cryptoutils.UnmarshalPEMToPrivateKey() methods directly.
+func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (SignerVerifier, error) {
+ fileBytes, err := ioutil.ReadFile(filepath.Clean(path))
+ if err != nil {
+ return nil, err
+ }
+ priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf)
+ if err != nil {
+ return nil, err
+ }
+ return LoadSignerVerifier(priv, hashFunc)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go
new file mode 100644
index 000000000..8852ecc4b
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go
@@ -0,0 +1,55 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/google/go-containerregistry/pkg/name"
+
+ sigpayload "github.com/sigstore/sigstore/pkg/signature/payload"
+)
+
+// SignImage signs a container manifest using the specified signer object
+func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map[string]interface{}) (payload, signature []byte, err error) {
+ imgPayload := sigpayload.Cosign{
+ Image: image,
+ Annotations: optionalAnnotations,
+ }
+ payload, err = json.Marshal(imgPayload)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to marshal payload to JSON: %w", err)
+ }
+ signature, err = signer.SignMessage(bytes.NewReader(payload))
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to sign payload: %w", err)
+ }
+ return payload, signature, nil
+}
+
+// VerifyImageSignature verifies a signature over a container manifest
+func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (image name.Digest, annotations map[string]interface{}, err error) {
+ if err := signer.VerifySignature(bytes.NewReader(signature), bytes.NewReader(payload)); err != nil {
+ return name.Digest{}, nil, fmt.Errorf("signature verification failed: %w", err)
+ }
+ var imgPayload sigpayload.Cosign
+ if err := json.Unmarshal(payload, &imgPayload); err != nil {
+ return name.Digest{}, nil, fmt.Errorf("could not deserialize image payload: %w", err)
+ }
+ return imgPayload.Image, imgPayload.Annotations, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
new file mode 100644
index 000000000..ea8660efc
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
@@ -0,0 +1,100 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "io"
+ "io/ioutil"
+ "path/filepath"
+
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+// Verifier verifies the digital signature using a specified public key
+type Verifier interface {
+ PublicKeyProvider
+ VerifySignature(signature, message io.Reader, opts ...VerifyOption) error
+}
+
+// LoadVerifier returns a signature.Verifier based on the algorithm of the public key
+// provided that will use the hash function specified when computing digests.
+//
+// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
+// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly.
+func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) {
+ switch pk := publicKey.(type) {
+ case *rsa.PublicKey:
+ return LoadRSAPKCS1v15Verifier(pk, hashFunc)
+ case *ecdsa.PublicKey:
+ return LoadECDSAVerifier(pk, hashFunc)
+ case ed25519.PublicKey:
+ return LoadED25519Verifier(pk)
+ }
+ return nil, errors.New("unsupported public key type")
+}
+
+// LoadUnsafeVerifier returns a signature.Verifier based on the algorithm of the public key
+// provided that will use SHA1 when computing digests for RSA and ECDSA signatures.
+//
+// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
+// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly.
+func LoadUnsafeVerifier(publicKey crypto.PublicKey) (Verifier, error) {
+ switch pk := publicKey.(type) {
+ case *rsa.PublicKey:
+ if pk == nil {
+ return nil, errors.New("invalid RSA public key specified")
+ }
+ return &RSAPKCS1v15Verifier{
+ publicKey: pk,
+ hashFunc: crypto.SHA1,
+ }, nil
+ case *ecdsa.PublicKey:
+ if pk == nil {
+ return nil, errors.New("invalid ECDSA public key specified")
+ }
+ return &ECDSAVerifier{
+ publicKey: pk,
+ hashFunc: crypto.SHA1,
+ }, nil
+ case ed25519.PublicKey:
+ return LoadED25519Verifier(pk)
+ }
+ return nil, errors.New("unsupported public key type")
+}
+
+// LoadVerifierFromPEMFile returns a signature.Verifier based on the contents of a
+// file located at path. The Verifier wil use the hash function specified when computing digests.
+//
+// If the publickey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
+// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() and cryptoutils.UnmarshalPEMToPublicKey() methods directly.
+func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error) {
+ fileBytes, err := ioutil.ReadFile(filepath.Clean(path))
+ if err != nil {
+ return nil, err
+ }
+
+ pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return LoadVerifier(pubKey, hashFunc)
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/LICENSE b/vendor/github.com/theupdateframework/go-tuf/LICENSE
new file mode 100644
index 000000000..38163dd4b
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Prime Directive, Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go b/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
new file mode 100644
index 000000000..4d174d61f
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
@@ -0,0 +1,226 @@
+// Package encrypted provides a simple, secure system for encrypting data
+// symmetrically with a passphrase.
+//
+// It uses scrypt derive a key from the passphrase and the NaCl secret box
+// cipher for authenticated encryption.
+package encrypted
+
+import (
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+
+ "golang.org/x/crypto/nacl/secretbox"
+ "golang.org/x/crypto/scrypt"
+)
+
+const saltSize = 32
+
+const (
+ boxKeySize = 32
+ boxNonceSize = 24
+)
+
+const (
+ // N parameter was chosen to be ~100ms of work using the default implementation
+ // on the 2.3GHz Core i7 Haswell processor in a late-2013 Apple Retina Macbook
+ // Pro (it takes ~113ms).
+ scryptN = 32768
+ scryptR = 8
+ scryptP = 1
+)
+
+const (
+ nameScrypt = "scrypt"
+ nameSecretBox = "nacl/secretbox"
+)
+
+type data struct {
+ KDF scryptKDF `json:"kdf"`
+ Cipher secretBoxCipher `json:"cipher"`
+ Ciphertext []byte `json:"ciphertext"`
+}
+
+type scryptParams struct {
+ N int `json:"N"`
+ R int `json:"r"`
+ P int `json:"p"`
+}
+
+func newScryptKDF() (scryptKDF, error) {
+ salt := make([]byte, saltSize)
+ if err := fillRandom(salt); err != nil {
+ return scryptKDF{}, err
+ }
+ return scryptKDF{
+ Name: nameScrypt,
+ Params: scryptParams{
+ N: scryptN,
+ R: scryptR,
+ P: scryptP,
+ },
+ Salt: salt,
+ }, nil
+}
+
+type scryptKDF struct {
+ Name string `json:"name"`
+ Params scryptParams `json:"params"`
+ Salt []byte `json:"salt"`
+}
+
+func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) {
+ return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize)
+}
+
+// CheckParams checks that the encoded KDF parameters are what we expect them to
+// be. If we do not do this, an attacker could cause a DoS by tampering with
+// them.
+func (s *scryptKDF) CheckParams() error {
+ if s.Params.N != scryptN || s.Params.R != scryptR || s.Params.P != scryptP {
+ return errors.New("encrypted: unexpected kdf parameters")
+ }
+ return nil
+}
+
+func newSecretBoxCipher() (secretBoxCipher, error) {
+ nonce := make([]byte, boxNonceSize)
+ if err := fillRandom(nonce); err != nil {
+ return secretBoxCipher{}, err
+ }
+ return secretBoxCipher{
+ Name: nameSecretBox,
+ Nonce: nonce,
+ }, nil
+}
+
+type secretBoxCipher struct {
+ Name string `json:"name"`
+ Nonce []byte `json:"nonce"`
+
+ encrypted bool
+}
+
+func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte {
+ var keyBytes [boxKeySize]byte
+ var nonceBytes [boxNonceSize]byte
+
+ if len(key) != len(keyBytes) {
+ panic("incorrect key size")
+ }
+ if len(s.Nonce) != len(nonceBytes) {
+ panic("incorrect nonce size")
+ }
+
+ copy(keyBytes[:], key)
+ copy(nonceBytes[:], s.Nonce)
+
+ // ensure that we don't re-use nonces
+ if s.encrypted {
+ panic("Encrypt must only be called once for each cipher instance")
+ }
+ s.encrypted = true
+
+ return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes)
+}
+
+func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) {
+ var keyBytes [boxKeySize]byte
+ var nonceBytes [boxNonceSize]byte
+
+ if len(key) != len(keyBytes) {
+ panic("incorrect key size")
+ }
+ if len(s.Nonce) != len(nonceBytes) {
+ // return an error instead of panicking since the nonce is user input
+ return nil, errors.New("encrypted: incorrect nonce size")
+ }
+
+ copy(keyBytes[:], key)
+ copy(nonceBytes[:], s.Nonce)
+
+ res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes)
+ if !ok {
+ return nil, errors.New("encrypted: decryption failed")
+ }
+ return res, nil
+}
+
+// Encrypt takes a passphrase and plaintext, and returns a JSON object
+// containing ciphertext and the details necessary to decrypt it.
+func Encrypt(plaintext, passphrase []byte) ([]byte, error) {
+ k, err := newScryptKDF()
+ if err != nil {
+ return nil, err
+ }
+ key, err := k.Key(passphrase)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := newSecretBoxCipher()
+ if err != nil {
+ return nil, err
+ }
+
+ data := &data{
+ KDF: k,
+ Cipher: c,
+ }
+ data.Ciphertext = c.Encrypt(plaintext, key)
+
+ return json.Marshal(data)
+}
+
+// Marshal encrypts the JSON encoding of v using passphrase.
+func Marshal(v interface{}, passphrase []byte) ([]byte, error) {
+ data, err := json.MarshalIndent(v, "", "\t")
+ if err != nil {
+ return nil, err
+ }
+ return Encrypt(data, passphrase)
+}
+
+// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and
+// tries to decrypt it using passphrase. If successful, it returns the
+// plaintext.
+func Decrypt(ciphertext, passphrase []byte) ([]byte, error) {
+ data := &data{}
+ if err := json.Unmarshal(ciphertext, data); err != nil {
+ return nil, err
+ }
+
+ if data.KDF.Name != nameScrypt {
+ return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name)
+ }
+ if data.Cipher.Name != nameSecretBox {
+ return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name)
+ }
+ if err := data.KDF.CheckParams(); err != nil {
+ return nil, err
+ }
+
+ key, err := data.KDF.Key(passphrase)
+ if err != nil {
+ return nil, err
+ }
+
+ return data.Cipher.Decrypt(data.Ciphertext, key)
+}
+
+// Unmarshal decrypts the data using passphrase and unmarshals the resulting
+// plaintext into the value pointed to by v.
+func Unmarshal(data []byte, v interface{}, passphrase []byte) error {
+ decrypted, err := Decrypt(data, passphrase)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(decrypted, v)
+}
+
+func fillRandom(b []byte) error {
+ _, err := io.ReadFull(rand.Reader, b)
+ return err
+}
diff --git a/vendor/github.com/titanous/rocacheck/LICENSE b/vendor/github.com/titanous/rocacheck/LICENSE
new file mode 100644
index 000000000..7bdce481f
--- /dev/null
+++ b/vendor/github.com/titanous/rocacheck/LICENSE
@@ -0,0 +1,22 @@
+MIT License
+
+Copyright (c) 2017, Jonathan Rudenberg
+Copyright (c) 2017, CRoCS, EnigmaBridge Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/titanous/rocacheck/README.md b/vendor/github.com/titanous/rocacheck/README.md
new file mode 100644
index 000000000..b8e765ea9
--- /dev/null
+++ b/vendor/github.com/titanous/rocacheck/README.md
@@ -0,0 +1,7 @@
+# rocacheck [![GoDoc](https://godoc.org/github.com/titanous/rocacheck?status.svg)](https://godoc.org/github.com/titanous/rocacheck)
+
+Package rocacheck is a Go implementation of the [key fingerprint
+algorithm](https://github.com/crocs-muni/roca) that checks if an RSA key was
+generated by broken Infineon code and is vulnerable to factorization via the
+[Return of Coppersmith's Attack
+(ROCA)](https://crocs.fi.muni.cz/public/papers/rsa_ccs17) / CVE-2017-15361.
diff --git a/vendor/github.com/titanous/rocacheck/rocacheck.go b/vendor/github.com/titanous/rocacheck/rocacheck.go
new file mode 100644
index 000000000..e813579bb
--- /dev/null
+++ b/vendor/github.com/titanous/rocacheck/rocacheck.go
@@ -0,0 +1,52 @@
+// Package rocacheck checks if a key was generated by broken Infineon code and
+// is vulnerable to factorization via the Return of Coppersmith's Attack (ROCA)
+// / CVE-2017-15361.
+package rocacheck
+
+import (
+ "crypto/rsa"
+ "math/big"
+)
+
+type test struct {
+ Prime *big.Int
+ Fingerprints map[int64]struct{}
+}
+
+var tests = make([]test, 17)
+
+func init() {
+ bigOne := big.NewInt(1)
+ n := &big.Int{}
+ // relations table from https://github.com/crocs-muni/roca/pull/40
+ for i, r := range [][2]int64{
+ {2, 11}, {6, 13}, {8, 17}, {9, 19}, {3, 37}, {26, 53}, {20, 61},
+ {35, 71}, {24, 73}, {13, 79}, {6, 97}, {51, 103}, {53, 107},
+ {54, 109}, {42, 127}, {50, 151}, {78, 157},
+ } {
+ fps := make(map[int64]struct{})
+ bp := big.NewInt(r[1])
+ br := big.NewInt(r[0])
+ for j := int64(0); j < r[1]; j++ {
+ if n.Exp(big.NewInt(j), br, bp).Cmp(bigOne) == 0 {
+ fps[j] = struct{}{}
+ }
+ }
+ tests[i] = test{
+ Prime: big.NewInt(r[1]),
+ Fingerprints: fps,
+ }
+ }
+}
+
+// IsWeak returns true if a RSA public key is vulnerable to Return of
+// Coppersmith's Attack (ROCA).
+func IsWeak(k *rsa.PublicKey) bool {
+ tmp := &big.Int{}
+ for _, t := range tests {
+ if _, ok := t.Fingerprints[tmp.Mod(k.N, t.Prime).Int64()]; !ok {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
new file mode 100644
index 000000000..a2973e626
--- /dev/null
+++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
@@ -0,0 +1,173 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package secretbox encrypts and authenticates small messages.
+
+Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
+secret-key cryptography. The length of messages is not hidden.
+
+It is the caller's responsibility to ensure the uniqueness of nonces—for
+example, by using nonce 1 for the first message, nonce 2 for the second
+message, etc. Nonces are long enough that randomly generated nonces have
+negligible risk of collision.
+
+Messages should be small because:
+
+1. The whole message needs to be held in memory to be processed.
+
+2. Using large messages pressures implementations on small machines to decrypt
+and process plaintext before authenticating it. This is very dangerous, and
+this API does not allow it, but a protocol that uses excessive message sizes
+might present some implementations with no other choice.
+
+3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
+
+4. Performance may be improved by working with messages that fit into data caches.
+
+Thus large amounts of data should be chunked so that each message is small.
+(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
+chunk size.
+
+This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
+*/
+package secretbox // import "golang.org/x/crypto/nacl/secretbox"
+
+import (
+ "golang.org/x/crypto/internal/poly1305"
+ "golang.org/x/crypto/internal/subtle"
+ "golang.org/x/crypto/salsa20/salsa"
+)
+
+// Overhead is the number of bytes of overhead when boxing a message.
+const Overhead = poly1305.TagSize
+
+// setup produces a sub-key and Salsa20 counter given a nonce and key.
+func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
+ // We use XSalsa20 for encryption so first we need to generate a
+ // key and nonce with HSalsa20.
+ var hNonce [16]byte
+ copy(hNonce[:], nonce[:])
+ salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
+
+ // The final 8 bytes of the original nonce form the new nonce.
+ copy(counter[:], nonce[16:])
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// Seal appends an encrypted and authenticated copy of message to out, which
+// must not overlap message. The key and nonce pair must be unique for each
+// distinct message and the output will be Overhead bytes longer than message.
+func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
+ var subKey [32]byte
+ var counter [16]byte
+ setup(&subKey, &counter, nonce, key)
+
+ // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
+ // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
+ // keystream as a side effect.
+ var firstBlock [64]byte
+ salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
+
+ var poly1305Key [32]byte
+ copy(poly1305Key[:], firstBlock[:])
+
+ ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
+ if subtle.AnyOverlap(out, message) {
+ panic("nacl: invalid buffer overlap")
+ }
+
+ // We XOR up to 32 bytes of message with the keystream generated from
+ // the first block.
+ firstMessageBlock := message
+ if len(firstMessageBlock) > 32 {
+ firstMessageBlock = firstMessageBlock[:32]
+ }
+
+ tagOut := out
+ out = out[poly1305.TagSize:]
+ for i, x := range firstMessageBlock {
+ out[i] = firstBlock[32+i] ^ x
+ }
+ message = message[len(firstMessageBlock):]
+ ciphertext := out
+ out = out[len(firstMessageBlock):]
+
+ // Now encrypt the rest.
+ counter[8] = 1
+ salsa.XORKeyStream(out, message, &counter, &subKey)
+
+ var tag [poly1305.TagSize]byte
+ poly1305.Sum(&tag, ciphertext, &poly1305Key)
+ copy(tagOut, tag[:])
+
+ return ret
+}
+
+// Open authenticates and decrypts a box produced by Seal and appends the
+// message to out, which must not overlap box. The output will be Overhead
+// bytes smaller than box.
+func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
+ if len(box) < Overhead {
+ return nil, false
+ }
+
+ var subKey [32]byte
+ var counter [16]byte
+ setup(&subKey, &counter, nonce, key)
+
+ // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
+ // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
+ // keystream as a side effect.
+ var firstBlock [64]byte
+ salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
+
+ var poly1305Key [32]byte
+ copy(poly1305Key[:], firstBlock[:])
+ var tag [poly1305.TagSize]byte
+ copy(tag[:], box)
+
+ if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
+ return nil, false
+ }
+
+ ret, out := sliceForAppend(out, len(box)-Overhead)
+ if subtle.AnyOverlap(out, box) {
+ panic("nacl: invalid buffer overlap")
+ }
+
+ // We XOR up to 32 bytes of box with the keystream generated from
+ // the first block.
+ box = box[Overhead:]
+ firstMessageBlock := box
+ if len(firstMessageBlock) > 32 {
+ firstMessageBlock = firstMessageBlock[:32]
+ }
+ for i, x := range firstMessageBlock {
+ out[i] = firstBlock[32+i] ^ x
+ }
+
+ box = box[len(firstMessageBlock):]
+ out = out[len(firstMessageBlock):]
+
+ // Now decrypt the rest.
+ counter[8] = 1
+ salsa.XORKeyStream(out, box, &counter, &subKey)
+
+ return ret, true
+}
diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go
new file mode 100644
index 000000000..4269ed113
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go
@@ -0,0 +1,792 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses
+// are signed messages attesting to the validity of a certificate for a small
+// period of time. This is used to manage revocation for X.509 certificates.
+package ocsp // import "golang.org/x/crypto/ocsp"
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ _ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "math/big"
+ "strconv"
+ "time"
+)
+
+var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1})
+
+// ResponseStatus contains the result of an OCSP request. See
+// https://tools.ietf.org/html/rfc6960#section-2.3
+type ResponseStatus int
+
+const (
+ Success ResponseStatus = 0
+ Malformed ResponseStatus = 1
+ InternalError ResponseStatus = 2
+ TryLater ResponseStatus = 3
+ // Status code four is unused in OCSP. See
+ // https://tools.ietf.org/html/rfc6960#section-4.2.1
+ SignatureRequired ResponseStatus = 5
+ Unauthorized ResponseStatus = 6
+)
+
+func (r ResponseStatus) String() string {
+ switch r {
+ case Success:
+ return "success"
+ case Malformed:
+ return "malformed"
+ case InternalError:
+ return "internal error"
+ case TryLater:
+ return "try later"
+ case SignatureRequired:
+ return "signature required"
+ case Unauthorized:
+ return "unauthorized"
+ default:
+ return "unknown OCSP status: " + strconv.Itoa(int(r))
+ }
+}
+
+// ResponseError is an error that may be returned by ParseResponse to indicate
+// that the response itself is an error, not just that it's indicating that a
+// certificate is revoked, unknown, etc.
+type ResponseError struct {
+ Status ResponseStatus
+}
+
+func (r ResponseError) Error() string {
+ return "ocsp: error from server: " + r.Status.String()
+}
+
+// These are internal structures that reflect the ASN.1 structure of an OCSP
+// response. See RFC 2560, section 4.2.
+
+type certID struct {
+ HashAlgorithm pkix.AlgorithmIdentifier
+ NameHash []byte
+ IssuerKeyHash []byte
+ SerialNumber *big.Int
+}
+
+// https://tools.ietf.org/html/rfc2560#section-4.1.1
+type ocspRequest struct {
+ TBSRequest tbsRequest
+}
+
+type tbsRequest struct {
+ Version int `asn1:"explicit,tag:0,default:0,optional"`
+ RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"`
+ RequestList []request
+}
+
+type request struct {
+ Cert certID
+}
+
+type responseASN1 struct {
+ Status asn1.Enumerated
+ Response responseBytes `asn1:"explicit,tag:0,optional"`
+}
+
+type responseBytes struct {
+ ResponseType asn1.ObjectIdentifier
+ Response []byte
+}
+
+type basicResponse struct {
+ TBSResponseData responseData
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ Signature asn1.BitString
+ Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"`
+}
+
+type responseData struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,default:0,explicit,tag:0"`
+ RawResponderID asn1.RawValue
+ ProducedAt time.Time `asn1:"generalized"`
+ Responses []singleResponse
+}
+
+type singleResponse struct {
+ CertID certID
+ Good asn1.Flag `asn1:"tag:0,optional"`
+ Revoked revokedInfo `asn1:"tag:1,optional"`
+ Unknown asn1.Flag `asn1:"tag:2,optional"`
+ ThisUpdate time.Time `asn1:"generalized"`
+ NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"`
+ SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"`
+}
+
+type revokedInfo struct {
+ RevocationTime time.Time `asn1:"generalized"`
+ Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"`
+}
+
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
+ oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+)
+
+var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{
+ crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}),
+ crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}),
+ crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}),
+ crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}),
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+var signatureAlgorithmDetails = []struct {
+ algo x509.SignatureAlgorithm
+ oid asn1.ObjectIdentifier
+ pubKeyAlgo x509.PublicKeyAlgorithm
+ hash crypto.Hash
+}{
+ {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
+ {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
+ {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
+ {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
+ {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
+ {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
+ {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
+ {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
+ {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
+ {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
+ {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
+ {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
+ var pubType x509.PublicKeyAlgorithm
+
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ pubType = x509.RSA
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureSHA256WithRSA
+ sigAlgo.Parameters = asn1.RawValue{
+ Tag: 5,
+ }
+
+ case *ecdsa.PublicKey:
+ pubType = x509.ECDSA
+
+ switch pub.Curve {
+ case elliptic.P224(), elliptic.P256():
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
+ case elliptic.P384():
+ hashFunc = crypto.SHA384
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
+ case elliptic.P521():
+ hashFunc = crypto.SHA512
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
+ default:
+ err = errors.New("x509: unknown elliptic curve")
+ }
+
+ default:
+ err = errors.New("x509: only RSA and ECDSA keys supported")
+ }
+
+ if err != nil {
+ return
+ }
+
+ if requestedSigAlgo == 0 {
+ return
+ }
+
+ found := false
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == requestedSigAlgo {
+ if details.pubKeyAlgo != pubType {
+ err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
+ return
+ }
+ sigAlgo.Algorithm, hashFunc = details.oid, details.hash
+ if hashFunc == 0 {
+ err = errors.New("x509: cannot sign with hash function requested")
+ return
+ }
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ err = errors.New("x509: unknown SignatureAlgorithm")
+ }
+
+ return
+}
+
+// TODO(agl): this is taken from crypto/x509 and so should probably be exported
+// from crypto/x509 or crypto/x509/pkix.
+func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm {
+ for _, details := range signatureAlgorithmDetails {
+ if oid.Equal(details.oid) {
+ return details.algo
+ }
+ }
+ return x509.UnknownSignatureAlgorithm
+}
+
+// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form.
+func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash {
+ for hash, oid := range hashOIDs {
+ if oid.Equal(target) {
+ return hash
+ }
+ }
+ return crypto.Hash(0)
+}
+
+func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier {
+ for hash, oid := range hashOIDs {
+ if hash == target {
+ return oid
+ }
+ }
+ return nil
+}
+
+// This is the exposed reflection of the internal OCSP structures.
+
+// The status values that can be expressed in OCSP. See RFC 6960.
+const (
+ // Good means that the certificate is valid.
+ Good = iota
+ // Revoked means that the certificate has been deliberately revoked.
+ Revoked
+ // Unknown means that the OCSP responder doesn't know about the certificate.
+ Unknown
+ // ServerFailed is unused and was never used (see
+ // https://go-review.googlesource.com/#/c/18944). ParseResponse will
+ // return a ResponseError when an error response is parsed.
+ ServerFailed
+)
+
+// The enumerated reasons for revoking a certificate. See RFC 5280.
+const (
+ Unspecified = 0
+ KeyCompromise = 1
+ CACompromise = 2
+ AffiliationChanged = 3
+ Superseded = 4
+ CessationOfOperation = 5
+ CertificateHold = 6
+
+ RemoveFromCRL = 8
+ PrivilegeWithdrawn = 9
+ AACompromise = 10
+)
+
+// Request represents an OCSP request. See RFC 6960.
+type Request struct {
+ HashAlgorithm crypto.Hash
+ IssuerNameHash []byte
+ IssuerKeyHash []byte
+ SerialNumber *big.Int
+}
+
+// Marshal marshals the OCSP request to ASN.1 DER encoded form.
+func (req *Request) Marshal() ([]byte, error) {
+ hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm)
+ if hashAlg == nil {
+ return nil, errors.New("Unknown hash algorithm")
+ }
+ return asn1.Marshal(ocspRequest{
+ tbsRequest{
+ Version: 0,
+ RequestList: []request{
+ {
+ Cert: certID{
+ pkix.AlgorithmIdentifier{
+ Algorithm: hashAlg,
+ Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+ },
+ req.IssuerNameHash,
+ req.IssuerKeyHash,
+ req.SerialNumber,
+ },
+ },
+ },
+ },
+ })
+}
+
+// Response represents an OCSP response containing a single SingleResponse. See
+// RFC 6960.
+type Response struct {
+ Raw []byte
+
+ // Status is one of {Good, Revoked, Unknown}
+ Status int
+ SerialNumber *big.Int
+ ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time
+ RevocationReason int
+ Certificate *x509.Certificate
+ // TBSResponseData contains the raw bytes of the signed response. If
+ // Certificate is nil then this can be used to verify Signature.
+ TBSResponseData []byte
+ Signature []byte
+ SignatureAlgorithm x509.SignatureAlgorithm
+
+ // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash.
+ // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512.
+ // If zero, the default is crypto.SHA1.
+ IssuerHash crypto.Hash
+
+ // RawResponderName optionally contains the DER-encoded subject of the
+ // responder certificate. Exactly one of RawResponderName and
+ // ResponderKeyHash is set.
+ RawResponderName []byte
+ // ResponderKeyHash optionally contains the SHA-1 hash of the
+ // responder's public key. Exactly one of RawResponderName and
+ // ResponderKeyHash is set.
+ ResponderKeyHash []byte
+
+ // Extensions contains raw X.509 extensions from the singleExtensions field
+ // of the OCSP response. When parsing certificates, this can be used to
+ // extract non-critical extensions that are not parsed by this package. When
+ // marshaling OCSP responses, the Extensions field is ignored, see
+ // ExtraExtensions.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains extensions to be copied, raw, into any marshaled
+ // OCSP response (in the singleExtensions field). Values override any
+ // extensions that would otherwise be produced based on the other fields. The
+ // ExtraExtensions field is not populated when parsing certificates, see
+ // Extensions.
+ ExtraExtensions []pkix.Extension
+}
+
+// These are pre-serialized error responses for the various non-success codes
+// defined by OCSP. The Unauthorized code in particular can be used by an OCSP
+// responder that supports only pre-signed responses as a response to requests
+// for certificates with unknown status. See RFC 5019.
+var (
+ MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}
+ InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}
+ TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}
+ SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}
+ UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}
+)
+
+// CheckSignatureFrom checks that the signature in resp is a valid signature
+// from issuer. This should only be used if resp.Certificate is nil. Otherwise,
+// the OCSP response contained an intermediate certificate that created the
+// signature. That signature is checked by ParseResponse and only
+// resp.Certificate remains to be validated.
+func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error {
+ return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature)
+}
+
+// ParseError results from an invalid OCSP response.
+type ParseError string
+
+func (p ParseError) Error() string {
+ return string(p)
+}
+
+// ParseRequest parses an OCSP request in DER form. It only supports
+// requests for a single certificate. Signed requests are not supported.
+// If a request includes a signature, it will result in a ParseError.
+func ParseRequest(bytes []byte) (*Request, error) {
+ var req ocspRequest
+ rest, err := asn1.Unmarshal(bytes, &req)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP request")
+ }
+
+ if len(req.TBSRequest.RequestList) == 0 {
+ return nil, ParseError("OCSP request contains no request body")
+ }
+ innerRequest := req.TBSRequest.RequestList[0]
+
+ hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm)
+ if hashFunc == crypto.Hash(0) {
+ return nil, ParseError("OCSP request uses unknown hash function")
+ }
+
+ return &Request{
+ HashAlgorithm: hashFunc,
+ IssuerNameHash: innerRequest.Cert.NameHash,
+ IssuerKeyHash: innerRequest.Cert.IssuerKeyHash,
+ SerialNumber: innerRequest.Cert.SerialNumber,
+ }, nil
+}
+
+// ParseResponse parses an OCSP response in DER form. The response must contain
+// only one certificate status. To parse the status of a specific certificate
+// from a response which may contain multiple statuses, use ParseResponseForCert
+// instead.
+//
+// If the response contains an embedded certificate, then that certificate will
+// be used to verify the response signature. If the response contains an
+// embedded certificate and issuer is not nil, then issuer will be used to verify
+// the signature on the embedded certificate.
+//
+// If the response does not contain an embedded certificate and issuer is not
+// nil, then issuer will be used to verify the response signature.
+//
+// Invalid responses and parse failures will result in a ParseError.
+// Error responses will result in a ResponseError.
+func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) {
+ return ParseResponseForCert(bytes, nil, issuer)
+}
+
+// ParseResponseForCert acts identically to ParseResponse, except it supports
+// parsing responses that contain multiple statuses. If the response contains
+// multiple statuses and cert is not nil, then ParseResponseForCert will return
+// the first status which contains a matching serial, otherwise it will return an
+// error. If cert is nil, then the first status in the response will be returned.
+func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) {
+ var resp responseASN1
+ rest, err := asn1.Unmarshal(bytes, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP response")
+ }
+
+ if status := ResponseStatus(resp.Status); status != Success {
+ return nil, ResponseError{status}
+ }
+
+ if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) {
+ return nil, ParseError("bad OCSP response type")
+ }
+
+ var basicResp basicResponse
+ rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP response")
+ }
+
+ if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 {
+ return nil, ParseError("OCSP response contains bad number of responses")
+ }
+
+ var singleResp singleResponse
+ if cert == nil {
+ singleResp = basicResp.TBSResponseData.Responses[0]
+ } else {
+ match := false
+ for _, resp := range basicResp.TBSResponseData.Responses {
+ if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 {
+ singleResp = resp
+ match = true
+ break
+ }
+ }
+ if !match {
+ return nil, ParseError("no response matching the supplied certificate")
+ }
+ }
+
+ ret := &Response{
+ Raw: bytes,
+ TBSResponseData: basicResp.TBSResponseData.Raw,
+ Signature: basicResp.Signature.RightAlign(),
+ SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm),
+ Extensions: singleResp.SingleExtensions,
+ SerialNumber: singleResp.CertID.SerialNumber,
+ ProducedAt: basicResp.TBSResponseData.ProducedAt,
+ ThisUpdate: singleResp.ThisUpdate,
+ NextUpdate: singleResp.NextUpdate,
+ }
+
+ // Handle the ResponderID CHOICE tag. ResponderID can be flattened into
+ // TBSResponseData once https://go-review.googlesource.com/34503 has been
+ // released.
+ rawResponderID := basicResp.TBSResponseData.RawResponderID
+ switch rawResponderID.Tag {
+ case 1: // Name
+ var rdn pkix.RDNSequence
+ if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 {
+ return nil, ParseError("invalid responder name")
+ }
+ ret.RawResponderName = rawResponderID.Bytes
+ case 2: // KeyHash
+ if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 {
+ return nil, ParseError("invalid responder key hash")
+ }
+ default:
+ return nil, ParseError("invalid responder id tag")
+ }
+
+ if len(basicResp.Certificates) > 0 {
+ // Responders should only send a single certificate (if they
+ // send any) that connects the responder's certificate to the
+ // original issuer. We accept responses with multiple
+ // certificates due to a number responders sending them[1], but
+ // ignore all but the first.
+ //
+ // [1] https://github.com/golang/go/issues/21527
+ ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := ret.CheckSignatureFrom(ret.Certificate); err != nil {
+ return nil, ParseError("bad signature on embedded certificate: " + err.Error())
+ }
+
+ if issuer != nil {
+ if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil {
+ return nil, ParseError("bad OCSP signature: " + err.Error())
+ }
+ }
+ } else if issuer != nil {
+ if err := ret.CheckSignatureFrom(issuer); err != nil {
+ return nil, ParseError("bad OCSP signature: " + err.Error())
+ }
+ }
+
+ for _, ext := range singleResp.SingleExtensions {
+ if ext.Critical {
+ return nil, ParseError("unsupported critical extension")
+ }
+ }
+
+ for h, oid := range hashOIDs {
+ if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) {
+ ret.IssuerHash = h
+ break
+ }
+ }
+ if ret.IssuerHash == 0 {
+ return nil, ParseError("unsupported issuer hash algorithm")
+ }
+
+ switch {
+ case bool(singleResp.Good):
+ ret.Status = Good
+ case bool(singleResp.Unknown):
+ ret.Status = Unknown
+ default:
+ ret.Status = Revoked
+ ret.RevokedAt = singleResp.Revoked.RevocationTime
+ ret.RevocationReason = int(singleResp.Revoked.Reason)
+ }
+
+ return ret, nil
+}
+
+// RequestOptions contains options for constructing OCSP requests.
+type RequestOptions struct {
+ // Hash contains the hash function that should be used when
+ // constructing the OCSP request. If zero, SHA-1 will be used.
+ Hash crypto.Hash
+}
+
+func (opts *RequestOptions) hash() crypto.Hash {
+ if opts == nil || opts.Hash == 0 {
+ // SHA-1 is nearly universally used in OCSP.
+ return crypto.SHA1
+ }
+ return opts.Hash
+}
+
+// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If
+// opts is nil then sensible defaults are used.
+func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) {
+ hashFunc := opts.hash()
+
+ // OCSP seems to be the only place where these raw hash identifiers are
+ // used. I took the following from
+ // http://msdn.microsoft.com/en-us/library/ff635603.aspx
+ _, ok := hashOIDs[hashFunc]
+ if !ok {
+ return nil, x509.ErrUnsupportedAlgorithm
+ }
+
+ if !hashFunc.Available() {
+ return nil, x509.ErrUnsupportedAlgorithm
+ }
+ h := opts.hash().New()
+
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+ return nil, err
+ }
+
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ req := &Request{
+ HashAlgorithm: hashFunc,
+ IssuerNameHash: issuerNameHash,
+ IssuerKeyHash: issuerKeyHash,
+ SerialNumber: cert.SerialNumber,
+ }
+ return req.Marshal()
+}
+
+// CreateResponse returns a DER-encoded OCSP response with the specified contents.
+// The fields in the response are populated as follows:
+//
+// The responder cert is used to populate the responder's name field, and the
+// certificate itself is provided alongside the OCSP response signature.
+//
+// The issuer cert is used to populate the IssuerNameHash and IssuerKeyHash fields.
+//
+// The template is used to populate the SerialNumber, Status, RevokedAt,
+// RevocationReason, ThisUpdate, and NextUpdate fields.
+//
+// If template.IssuerHash is not set, SHA1 will be used.
+//
+// The ProducedAt date is automatically set to the current date, to the nearest minute.
+func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) {
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+ return nil, err
+ }
+
+ if template.IssuerHash == 0 {
+ template.IssuerHash = crypto.SHA1
+ }
+ hashOID := getOIDFromHashAlgorithm(template.IssuerHash)
+ if hashOID == nil {
+ return nil, errors.New("unsupported issuer hash algorithm")
+ }
+
+ if !template.IssuerHash.Available() {
+ return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash)
+ }
+ h := template.IssuerHash.New()
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ innerResponse := singleResponse{
+ CertID: certID{
+ HashAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: hashOID,
+ Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+ },
+ NameHash: issuerNameHash,
+ IssuerKeyHash: issuerKeyHash,
+ SerialNumber: template.SerialNumber,
+ },
+ ThisUpdate: template.ThisUpdate.UTC(),
+ NextUpdate: template.NextUpdate.UTC(),
+ SingleExtensions: template.ExtraExtensions,
+ }
+
+ switch template.Status {
+ case Good:
+ innerResponse.Good = true
+ case Unknown:
+ innerResponse.Unknown = true
+ case Revoked:
+ innerResponse.Revoked = revokedInfo{
+ RevocationTime: template.RevokedAt.UTC(),
+ Reason: asn1.Enumerated(template.RevocationReason),
+ }
+ }
+
+ rawResponderID := asn1.RawValue{
+ Class: 2, // context-specific
+ Tag: 1, // Name (explicit tag)
+ IsCompound: true,
+ Bytes: responderCert.RawSubject,
+ }
+ tbsResponseData := responseData{
+ Version: 0,
+ RawResponderID: rawResponderID,
+ ProducedAt: time.Now().Truncate(time.Minute).UTC(),
+ Responses: []singleResponse{innerResponse},
+ }
+
+ tbsResponseDataDER, err := asn1.Marshal(tbsResponseData)
+ if err != nil {
+ return nil, err
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ responseHash := hashFunc.New()
+ responseHash.Write(tbsResponseDataDER)
+ signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ response := basicResponse{
+ TBSResponseData: tbsResponseData,
+ SignatureAlgorithm: signatureAlgorithm,
+ Signature: asn1.BitString{
+ Bytes: signature,
+ BitLength: 8 * len(signature),
+ },
+ }
+ if template.Certificate != nil {
+ response.Certificates = []asn1.RawValue{
+ {FullBytes: template.Certificate.Raw},
+ }
+ }
+ responseDER, err := asn1.Marshal(response)
+ if err != nil {
+ return nil, err
+ }
+
+ return asn1.Marshal(responseASN1{
+ Status: asn1.Enumerated(Success),
+ Response: responseBytes{
+ ResponseType: idPKIXOCSPBasic,
+ Response: responseDER,
+ },
+ })
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
new file mode 100644
index 000000000..4c96147c8
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package salsa provides low-level access to functions in the Salsa family.
+package salsa // import "golang.org/x/crypto/salsa20/salsa"
+
+// Sigma is the Salsa20 constant for 256-bit keys.
+var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
+
+// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
+// key k, and 16-byte constant c, and puts the result into the 32-byte array
+// out.
+func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
+ x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
+ x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
+ x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
+ x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
+ x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
+ x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
+ x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
+ x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
+ x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
+ x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
+ x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
+ x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
+
+ for i := 0; i < 20; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x5)
+ out[5] = byte(x5 >> 8)
+ out[6] = byte(x5 >> 16)
+ out[7] = byte(x5 >> 24)
+
+ out[8] = byte(x10)
+ out[9] = byte(x10 >> 8)
+ out[10] = byte(x10 >> 16)
+ out[11] = byte(x10 >> 24)
+
+ out[12] = byte(x15)
+ out[13] = byte(x15 >> 8)
+ out[14] = byte(x15 >> 16)
+ out[15] = byte(x15 >> 24)
+
+ out[16] = byte(x6)
+ out[17] = byte(x6 >> 8)
+ out[18] = byte(x6 >> 16)
+ out[19] = byte(x6 >> 24)
+
+ out[20] = byte(x7)
+ out[21] = byte(x7 >> 8)
+ out[22] = byte(x7 >> 16)
+ out[23] = byte(x7 >> 24)
+
+ out[24] = byte(x8)
+ out[25] = byte(x8 >> 8)
+ out[26] = byte(x8 >> 16)
+ out[27] = byte(x8 >> 24)
+
+ out[28] = byte(x9)
+ out[29] = byte(x9 >> 8)
+ out[30] = byte(x9 >> 16)
+ out[31] = byte(x9 >> 24)
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
new file mode 100644
index 000000000..9bfc0927c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
@@ -0,0 +1,199 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package salsa
+
+// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
+// the result into the 64-byte array out. The input and output may be the same array.
+func Core208(out *[64]byte, in *[64]byte) {
+ j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
+ j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
+ j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
+ j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
+ j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
+ j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
+ j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
+ j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
+ j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
+ j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
+ j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
+ j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
+ x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
+
+ for i := 0; i < 8; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ x0 += j0
+ x1 += j1
+ x2 += j2
+ x3 += j3
+ x4 += j4
+ x5 += j5
+ x6 += j6
+ x7 += j7
+ x8 += j8
+ x9 += j9
+ x10 += j10
+ x11 += j11
+ x12 += j12
+ x13 += j13
+ x14 += j14
+ x15 += j15
+
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x1)
+ out[5] = byte(x1 >> 8)
+ out[6] = byte(x1 >> 16)
+ out[7] = byte(x1 >> 24)
+
+ out[8] = byte(x2)
+ out[9] = byte(x2 >> 8)
+ out[10] = byte(x2 >> 16)
+ out[11] = byte(x2 >> 24)
+
+ out[12] = byte(x3)
+ out[13] = byte(x3 >> 8)
+ out[14] = byte(x3 >> 16)
+ out[15] = byte(x3 >> 24)
+
+ out[16] = byte(x4)
+ out[17] = byte(x4 >> 8)
+ out[18] = byte(x4 >> 16)
+ out[19] = byte(x4 >> 24)
+
+ out[20] = byte(x5)
+ out[21] = byte(x5 >> 8)
+ out[22] = byte(x5 >> 16)
+ out[23] = byte(x5 >> 24)
+
+ out[24] = byte(x6)
+ out[25] = byte(x6 >> 8)
+ out[26] = byte(x6 >> 16)
+ out[27] = byte(x6 >> 24)
+
+ out[28] = byte(x7)
+ out[29] = byte(x7 >> 8)
+ out[30] = byte(x7 >> 16)
+ out[31] = byte(x7 >> 24)
+
+ out[32] = byte(x8)
+ out[33] = byte(x8 >> 8)
+ out[34] = byte(x8 >> 16)
+ out[35] = byte(x8 >> 24)
+
+ out[36] = byte(x9)
+ out[37] = byte(x9 >> 8)
+ out[38] = byte(x9 >> 16)
+ out[39] = byte(x9 >> 24)
+
+ out[40] = byte(x10)
+ out[41] = byte(x10 >> 8)
+ out[42] = byte(x10 >> 16)
+ out[43] = byte(x10 >> 24)
+
+ out[44] = byte(x11)
+ out[45] = byte(x11 >> 8)
+ out[46] = byte(x11 >> 16)
+ out[47] = byte(x11 >> 24)
+
+ out[48] = byte(x12)
+ out[49] = byte(x12 >> 8)
+ out[50] = byte(x12 >> 16)
+ out[51] = byte(x12 >> 24)
+
+ out[52] = byte(x13)
+ out[53] = byte(x13 >> 8)
+ out[54] = byte(x13 >> 16)
+ out[55] = byte(x13 >> 24)
+
+ out[56] = byte(x14)
+ out[57] = byte(x14 >> 8)
+ out[58] = byte(x14 >> 16)
+ out[59] = byte(x14 >> 24)
+
+ out[60] = byte(x15)
+ out[61] = byte(x15 >> 8)
+ out[62] = byte(x15 >> 16)
+ out[63] = byte(x15 >> 24)
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
new file mode 100644
index 000000000..c400dfcf7
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
@@ -0,0 +1,24 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+// +build amd64,!purego,gc
+
+package salsa
+
+//go:noescape
+
+// salsa2020XORKeyStream is implemented in salsa20_amd64.s.
+func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
+
+// XORKeyStream crypts bytes from in to out using the given key and counters.
+// In and out must overlap entirely or not at all. Counter
+// contains the raw salsa20 counter bytes (both nonce and block counter).
+func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ if len(in) == 0 {
+ return
+ }
+ _ = out[len(in)-1]
+ salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
new file mode 100644
index 000000000..c08927720
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
@@ -0,0 +1,881 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+// +build amd64,!purego,gc
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
+
+// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
+// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size.
+TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment
+ MOVQ out+0(FP),DI
+ MOVQ in+8(FP),SI
+ MOVQ n+16(FP),DX
+ MOVQ nonce+24(FP),CX
+ MOVQ key+32(FP),R8
+
+ MOVQ SP,R12
+ ADDQ $31, R12
+ ANDQ $~31, R12
+
+ MOVQ DX,R9
+ MOVQ CX,DX
+ MOVQ R8,R10
+ CMPQ R9,$0
+ JBE DONE
+ START:
+ MOVL 20(R10),CX
+ MOVL 0(R10),R8
+ MOVL 0(DX),AX
+ MOVL 16(R10),R11
+ MOVL CX,0(R12)
+ MOVL R8, 4 (R12)
+ MOVL AX, 8 (R12)
+ MOVL R11, 12 (R12)
+ MOVL 8(DX),CX
+ MOVL 24(R10),R8
+ MOVL 4(R10),AX
+ MOVL 4(DX),R11
+ MOVL CX,16(R12)
+ MOVL R8, 20 (R12)
+ MOVL AX, 24 (R12)
+ MOVL R11, 28 (R12)
+ MOVL 12(DX),CX
+ MOVL 12(R10),DX
+ MOVL 28(R10),R8
+ MOVL 8(R10),AX
+ MOVL DX,32(R12)
+ MOVL CX, 36 (R12)
+ MOVL R8, 40 (R12)
+ MOVL AX, 44 (R12)
+ MOVQ $1634760805,DX
+ MOVQ $857760878,CX
+ MOVQ $2036477234,R8
+ MOVQ $1797285236,AX
+ MOVL DX,48(R12)
+ MOVL CX, 52 (R12)
+ MOVL R8, 56 (R12)
+ MOVL AX, 60 (R12)
+ CMPQ R9,$256
+ JB BYTESBETWEEN1AND255
+ MOVOA 48(R12),X0
+ PSHUFL $0X55,X0,X1
+ PSHUFL $0XAA,X0,X2
+ PSHUFL $0XFF,X0,X3
+ PSHUFL $0X00,X0,X0
+ MOVOA X1,64(R12)
+ MOVOA X2,80(R12)
+ MOVOA X3,96(R12)
+ MOVOA X0,112(R12)
+ MOVOA 0(R12),X0
+ PSHUFL $0XAA,X0,X1
+ PSHUFL $0XFF,X0,X2
+ PSHUFL $0X00,X0,X3
+ PSHUFL $0X55,X0,X0
+ MOVOA X1,128(R12)
+ MOVOA X2,144(R12)
+ MOVOA X3,160(R12)
+ MOVOA X0,176(R12)
+ MOVOA 16(R12),X0
+ PSHUFL $0XFF,X0,X1
+ PSHUFL $0X55,X0,X2
+ PSHUFL $0XAA,X0,X0
+ MOVOA X1,192(R12)
+ MOVOA X2,208(R12)
+ MOVOA X0,224(R12)
+ MOVOA 32(R12),X0
+ PSHUFL $0X00,X0,X1
+ PSHUFL $0XAA,X0,X2
+ PSHUFL $0XFF,X0,X0
+ MOVOA X1,240(R12)
+ MOVOA X2,256(R12)
+ MOVOA X0,272(R12)
+ BYTESATLEAST256:
+ MOVL 16(R12),DX
+ MOVL 36 (R12),CX
+ MOVL DX,288(R12)
+ MOVL CX,304(R12)
+ SHLQ $32,CX
+ ADDQ CX,DX
+ ADDQ $1,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 292 (R12)
+ MOVL CX, 308 (R12)
+ ADDQ $1,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 296 (R12)
+ MOVL CX, 312 (R12)
+ ADDQ $1,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 300 (R12)
+ MOVL CX, 316 (R12)
+ ADDQ $1,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX,16(R12)
+ MOVL CX, 36 (R12)
+ MOVQ R9,352(R12)
+ MOVQ $20,DX
+ MOVOA 64(R12),X0
+ MOVOA 80(R12),X1
+ MOVOA 96(R12),X2
+ MOVOA 256(R12),X3
+ MOVOA 272(R12),X4
+ MOVOA 128(R12),X5
+ MOVOA 144(R12),X6
+ MOVOA 176(R12),X7
+ MOVOA 192(R12),X8
+ MOVOA 208(R12),X9
+ MOVOA 224(R12),X10
+ MOVOA 304(R12),X11
+ MOVOA 112(R12),X12
+ MOVOA 160(R12),X13
+ MOVOA 240(R12),X14
+ MOVOA 288(R12),X15
+ MAINLOOP1:
+ MOVOA X1,320(R12)
+ MOVOA X2,336(R12)
+ MOVOA X13,X1
+ PADDL X12,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X14
+ PSRLL $25,X2
+ PXOR X2,X14
+ MOVOA X7,X1
+ PADDL X0,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X11
+ PSRLL $25,X2
+ PXOR X2,X11
+ MOVOA X12,X1
+ PADDL X14,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X15
+ PSRLL $23,X2
+ PXOR X2,X15
+ MOVOA X0,X1
+ PADDL X11,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X9
+ PSRLL $23,X2
+ PXOR X2,X9
+ MOVOA X14,X1
+ PADDL X15,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X13
+ PSRLL $19,X2
+ PXOR X2,X13
+ MOVOA X11,X1
+ PADDL X9,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X7
+ PSRLL $19,X2
+ PXOR X2,X7
+ MOVOA X15,X1
+ PADDL X13,X1
+ MOVOA X1,X2
+ PSLLL $18,X1
+ PXOR X1,X12
+ PSRLL $14,X2
+ PXOR X2,X12
+ MOVOA 320(R12),X1
+ MOVOA X12,320(R12)
+ MOVOA X9,X2
+ PADDL X7,X2
+ MOVOA X2,X12
+ PSLLL $18,X2
+ PXOR X2,X0
+ PSRLL $14,X12
+ PXOR X12,X0
+ MOVOA X5,X2
+ PADDL X1,X2
+ MOVOA X2,X12
+ PSLLL $7,X2
+ PXOR X2,X3
+ PSRLL $25,X12
+ PXOR X12,X3
+ MOVOA 336(R12),X2
+ MOVOA X0,336(R12)
+ MOVOA X6,X0
+ PADDL X2,X0
+ MOVOA X0,X12
+ PSLLL $7,X0
+ PXOR X0,X4
+ PSRLL $25,X12
+ PXOR X12,X4
+ MOVOA X1,X0
+ PADDL X3,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X10
+ PSRLL $23,X12
+ PXOR X12,X10
+ MOVOA X2,X0
+ PADDL X4,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X8
+ PSRLL $23,X12
+ PXOR X12,X8
+ MOVOA X3,X0
+ PADDL X10,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X5
+ PSRLL $19,X12
+ PXOR X12,X5
+ MOVOA X4,X0
+ PADDL X8,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X6
+ PSRLL $19,X12
+ PXOR X12,X6
+ MOVOA X10,X0
+ PADDL X5,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X1
+ PSRLL $14,X12
+ PXOR X12,X1
+ MOVOA 320(R12),X0
+ MOVOA X1,320(R12)
+ MOVOA X4,X1
+ PADDL X0,X1
+ MOVOA X1,X12
+ PSLLL $7,X1
+ PXOR X1,X7
+ PSRLL $25,X12
+ PXOR X12,X7
+ MOVOA X8,X1
+ PADDL X6,X1
+ MOVOA X1,X12
+ PSLLL $18,X1
+ PXOR X1,X2
+ PSRLL $14,X12
+ PXOR X12,X2
+ MOVOA 336(R12),X12
+ MOVOA X2,336(R12)
+ MOVOA X14,X1
+ PADDL X12,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X5
+ PSRLL $25,X2
+ PXOR X2,X5
+ MOVOA X0,X1
+ PADDL X7,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X10
+ PSRLL $23,X2
+ PXOR X2,X10
+ MOVOA X12,X1
+ PADDL X5,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X8
+ PSRLL $23,X2
+ PXOR X2,X8
+ MOVOA X7,X1
+ PADDL X10,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X4
+ PSRLL $19,X2
+ PXOR X2,X4
+ MOVOA X5,X1
+ PADDL X8,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X14
+ PSRLL $19,X2
+ PXOR X2,X14
+ MOVOA X10,X1
+ PADDL X4,X1
+ MOVOA X1,X2
+ PSLLL $18,X1
+ PXOR X1,X0
+ PSRLL $14,X2
+ PXOR X2,X0
+ MOVOA 320(R12),X1
+ MOVOA X0,320(R12)
+ MOVOA X8,X0
+ PADDL X14,X0
+ MOVOA X0,X2
+ PSLLL $18,X0
+ PXOR X0,X12
+ PSRLL $14,X2
+ PXOR X2,X12
+ MOVOA X11,X0
+ PADDL X1,X0
+ MOVOA X0,X2
+ PSLLL $7,X0
+ PXOR X0,X6
+ PSRLL $25,X2
+ PXOR X2,X6
+ MOVOA 336(R12),X2
+ MOVOA X12,336(R12)
+ MOVOA X3,X0
+ PADDL X2,X0
+ MOVOA X0,X12
+ PSLLL $7,X0
+ PXOR X0,X13
+ PSRLL $25,X12
+ PXOR X12,X13
+ MOVOA X1,X0
+ PADDL X6,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X15
+ PSRLL $23,X12
+ PXOR X12,X15
+ MOVOA X2,X0
+ PADDL X13,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X9
+ PSRLL $23,X12
+ PXOR X12,X9
+ MOVOA X6,X0
+ PADDL X15,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X11
+ PSRLL $19,X12
+ PXOR X12,X11
+ MOVOA X13,X0
+ PADDL X9,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X3
+ PSRLL $19,X12
+ PXOR X12,X3
+ MOVOA X15,X0
+ PADDL X11,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X1
+ PSRLL $14,X12
+ PXOR X12,X1
+ MOVOA X9,X0
+ PADDL X3,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X2
+ PSRLL $14,X12
+ PXOR X12,X2
+ MOVOA 320(R12),X12
+ MOVOA 336(R12),X0
+ SUBQ $2,DX
+ JA MAINLOOP1
+ PADDL 112(R12),X12
+ PADDL 176(R12),X7
+ PADDL 224(R12),X10
+ PADDL 272(R12),X4
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 0(SI),DX
+ XORL 4(SI),CX
+ XORL 8(SI),R8
+ XORL 12(SI),R9
+ MOVL DX,0(DI)
+ MOVL CX,4(DI)
+ MOVL R8,8(DI)
+ MOVL R9,12(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 64(SI),DX
+ XORL 68(SI),CX
+ XORL 72(SI),R8
+ XORL 76(SI),R9
+ MOVL DX,64(DI)
+ MOVL CX,68(DI)
+ MOVL R8,72(DI)
+ MOVL R9,76(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 128(SI),DX
+ XORL 132(SI),CX
+ XORL 136(SI),R8
+ XORL 140(SI),R9
+ MOVL DX,128(DI)
+ MOVL CX,132(DI)
+ MOVL R8,136(DI)
+ MOVL R9,140(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ XORL 192(SI),DX
+ XORL 196(SI),CX
+ XORL 200(SI),R8
+ XORL 204(SI),R9
+ MOVL DX,192(DI)
+ MOVL CX,196(DI)
+ MOVL R8,200(DI)
+ MOVL R9,204(DI)
+ PADDL 240(R12),X14
+ PADDL 64(R12),X0
+ PADDL 128(R12),X5
+ PADDL 192(R12),X8
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 16(SI),DX
+ XORL 20(SI),CX
+ XORL 24(SI),R8
+ XORL 28(SI),R9
+ MOVL DX,16(DI)
+ MOVL CX,20(DI)
+ MOVL R8,24(DI)
+ MOVL R9,28(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 80(SI),DX
+ XORL 84(SI),CX
+ XORL 88(SI),R8
+ XORL 92(SI),R9
+ MOVL DX,80(DI)
+ MOVL CX,84(DI)
+ MOVL R8,88(DI)
+ MOVL R9,92(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 144(SI),DX
+ XORL 148(SI),CX
+ XORL 152(SI),R8
+ XORL 156(SI),R9
+ MOVL DX,144(DI)
+ MOVL CX,148(DI)
+ MOVL R8,152(DI)
+ MOVL R9,156(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ XORL 208(SI),DX
+ XORL 212(SI),CX
+ XORL 216(SI),R8
+ XORL 220(SI),R9
+ MOVL DX,208(DI)
+ MOVL CX,212(DI)
+ MOVL R8,216(DI)
+ MOVL R9,220(DI)
+ PADDL 288(R12),X15
+ PADDL 304(R12),X11
+ PADDL 80(R12),X1
+ PADDL 144(R12),X6
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 32(SI),DX
+ XORL 36(SI),CX
+ XORL 40(SI),R8
+ XORL 44(SI),R9
+ MOVL DX,32(DI)
+ MOVL CX,36(DI)
+ MOVL R8,40(DI)
+ MOVL R9,44(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 96(SI),DX
+ XORL 100(SI),CX
+ XORL 104(SI),R8
+ XORL 108(SI),R9
+ MOVL DX,96(DI)
+ MOVL CX,100(DI)
+ MOVL R8,104(DI)
+ MOVL R9,108(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 160(SI),DX
+ XORL 164(SI),CX
+ XORL 168(SI),R8
+ XORL 172(SI),R9
+ MOVL DX,160(DI)
+ MOVL CX,164(DI)
+ MOVL R8,168(DI)
+ MOVL R9,172(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ XORL 224(SI),DX
+ XORL 228(SI),CX
+ XORL 232(SI),R8
+ XORL 236(SI),R9
+ MOVL DX,224(DI)
+ MOVL CX,228(DI)
+ MOVL R8,232(DI)
+ MOVL R9,236(DI)
+ PADDL 160(R12),X13
+ PADDL 208(R12),X9
+ PADDL 256(R12),X3
+ PADDL 96(R12),X2
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 48(SI),DX
+ XORL 52(SI),CX
+ XORL 56(SI),R8
+ XORL 60(SI),R9
+ MOVL DX,48(DI)
+ MOVL CX,52(DI)
+ MOVL R8,56(DI)
+ MOVL R9,60(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 112(SI),DX
+ XORL 116(SI),CX
+ XORL 120(SI),R8
+ XORL 124(SI),R9
+ MOVL DX,112(DI)
+ MOVL CX,116(DI)
+ MOVL R8,120(DI)
+ MOVL R9,124(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 176(SI),DX
+ XORL 180(SI),CX
+ XORL 184(SI),R8
+ XORL 188(SI),R9
+ MOVL DX,176(DI)
+ MOVL CX,180(DI)
+ MOVL R8,184(DI)
+ MOVL R9,188(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ XORL 240(SI),DX
+ XORL 244(SI),CX
+ XORL 248(SI),R8
+ XORL 252(SI),R9
+ MOVL DX,240(DI)
+ MOVL CX,244(DI)
+ MOVL R8,248(DI)
+ MOVL R9,252(DI)
+ MOVQ 352(R12),R9
+ SUBQ $256,R9
+ ADDQ $256,SI
+ ADDQ $256,DI
+ CMPQ R9,$256
+ JAE BYTESATLEAST256
+ CMPQ R9,$0
+ JBE DONE
+ BYTESBETWEEN1AND255:
+ CMPQ R9,$64
+ JAE NOCOPY
+ MOVQ DI,DX
+ LEAQ 360(R12),DI
+ MOVQ R9,CX
+ REP; MOVSB
+ LEAQ 360(R12),DI
+ LEAQ 360(R12),SI
+ NOCOPY:
+ MOVQ R9,352(R12)
+ MOVOA 48(R12),X0
+ MOVOA 0(R12),X1
+ MOVOA 16(R12),X2
+ MOVOA 32(R12),X3
+ MOVOA X1,X4
+ MOVQ $20,CX
+ MAINLOOP2:
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X3
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X3,X3
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X1
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X1,X1
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X1
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X1,X1
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X3
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X3,X3
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X3
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X3,X3
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X1
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X1,X1
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X1
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X1,X1
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X3
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X3
+ SUBQ $4,CX
+ PADDL X3,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PXOR X7,X7
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X3,X3
+ PXOR X6,X0
+ JA MAINLOOP2
+ PADDL 48(R12),X0
+ PADDL 0(R12),X1
+ PADDL 16(R12),X2
+ PADDL 32(R12),X3
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 0(SI),CX
+ XORL 48(SI),R8
+ XORL 32(SI),R9
+ XORL 16(SI),AX
+ MOVL CX,0(DI)
+ MOVL R8,48(DI)
+ MOVL R9,32(DI)
+ MOVL AX,16(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 20(SI),CX
+ XORL 4(SI),R8
+ XORL 52(SI),R9
+ XORL 36(SI),AX
+ MOVL CX,20(DI)
+ MOVL R8,4(DI)
+ MOVL R9,52(DI)
+ MOVL AX,36(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 40(SI),CX
+ XORL 24(SI),R8
+ XORL 8(SI),R9
+ XORL 56(SI),AX
+ MOVL CX,40(DI)
+ MOVL R8,24(DI)
+ MOVL R9,8(DI)
+ MOVL AX,56(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ XORL 60(SI),CX
+ XORL 44(SI),R8
+ XORL 28(SI),R9
+ XORL 12(SI),AX
+ MOVL CX,60(DI)
+ MOVL R8,44(DI)
+ MOVL R9,28(DI)
+ MOVL AX,12(DI)
+ MOVQ 352(R12),R9
+ MOVL 16(R12),CX
+ MOVL 36 (R12),R8
+ ADDQ $1,CX
+ SHLQ $32,R8
+ ADDQ R8,CX
+ MOVQ CX,R8
+ SHRQ $32,R8
+ MOVL CX,16(R12)
+ MOVL R8, 36 (R12)
+ CMPQ R9,$64
+ JA BYTESATLEAST65
+ JAE BYTESATLEAST64
+ MOVQ DI,SI
+ MOVQ DX,DI
+ MOVQ R9,CX
+ REP; MOVSB
+ BYTESATLEAST64:
+ DONE:
+ RET
+ BYTESATLEAST65:
+ SUBQ $64,R9
+ ADDQ $64,DI
+ ADDQ $64,SI
+ JMP BYTESBETWEEN1AND255
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
new file mode 100644
index 000000000..4392cc1ac
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
@@ -0,0 +1,15 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || purego || !gc
+// +build !amd64 purego !gc
+
+package salsa
+
+// XORKeyStream crypts bytes from in to out using the given key and counters.
+// In and out must overlap entirely or not at all. Counter
+// contains the raw salsa20 counter bytes (both nonce and block counter).
+func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ genericXORKeyStream(out, in, counter, key)
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
new file mode 100644
index 000000000..68169c6d6
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
@@ -0,0 +1,231 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package salsa
+
+const rounds = 20
+
+// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
+// and 16-byte constant c, and puts the result into 64-byte array out.
+func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
+ j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
+ j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
+ j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
+ j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
+ j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
+ j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
+ j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
+ j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
+ j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
+ j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
+ j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
+ j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
+ x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
+
+ for i := 0; i < rounds; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ x0 += j0
+ x1 += j1
+ x2 += j2
+ x3 += j3
+ x4 += j4
+ x5 += j5
+ x6 += j6
+ x7 += j7
+ x8 += j8
+ x9 += j9
+ x10 += j10
+ x11 += j11
+ x12 += j12
+ x13 += j13
+ x14 += j14
+ x15 += j15
+
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x1)
+ out[5] = byte(x1 >> 8)
+ out[6] = byte(x1 >> 16)
+ out[7] = byte(x1 >> 24)
+
+ out[8] = byte(x2)
+ out[9] = byte(x2 >> 8)
+ out[10] = byte(x2 >> 16)
+ out[11] = byte(x2 >> 24)
+
+ out[12] = byte(x3)
+ out[13] = byte(x3 >> 8)
+ out[14] = byte(x3 >> 16)
+ out[15] = byte(x3 >> 24)
+
+ out[16] = byte(x4)
+ out[17] = byte(x4 >> 8)
+ out[18] = byte(x4 >> 16)
+ out[19] = byte(x4 >> 24)
+
+ out[20] = byte(x5)
+ out[21] = byte(x5 >> 8)
+ out[22] = byte(x5 >> 16)
+ out[23] = byte(x5 >> 24)
+
+ out[24] = byte(x6)
+ out[25] = byte(x6 >> 8)
+ out[26] = byte(x6 >> 16)
+ out[27] = byte(x6 >> 24)
+
+ out[28] = byte(x7)
+ out[29] = byte(x7 >> 8)
+ out[30] = byte(x7 >> 16)
+ out[31] = byte(x7 >> 24)
+
+ out[32] = byte(x8)
+ out[33] = byte(x8 >> 8)
+ out[34] = byte(x8 >> 16)
+ out[35] = byte(x8 >> 24)
+
+ out[36] = byte(x9)
+ out[37] = byte(x9 >> 8)
+ out[38] = byte(x9 >> 16)
+ out[39] = byte(x9 >> 24)
+
+ out[40] = byte(x10)
+ out[41] = byte(x10 >> 8)
+ out[42] = byte(x10 >> 16)
+ out[43] = byte(x10 >> 24)
+
+ out[44] = byte(x11)
+ out[45] = byte(x11 >> 8)
+ out[46] = byte(x11 >> 16)
+ out[47] = byte(x11 >> 24)
+
+ out[48] = byte(x12)
+ out[49] = byte(x12 >> 8)
+ out[50] = byte(x12 >> 16)
+ out[51] = byte(x12 >> 24)
+
+ out[52] = byte(x13)
+ out[53] = byte(x13 >> 8)
+ out[54] = byte(x13 >> 16)
+ out[55] = byte(x13 >> 24)
+
+ out[56] = byte(x14)
+ out[57] = byte(x14 >> 8)
+ out[58] = byte(x14 >> 16)
+ out[59] = byte(x14 >> 24)
+
+ out[60] = byte(x15)
+ out[61] = byte(x15 >> 8)
+ out[62] = byte(x15 >> 16)
+ out[63] = byte(x15 >> 24)
+}
+
+// genericXORKeyStream is the generic implementation of XORKeyStream to be used
+// when no assembly implementation is available.
+func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ var block [64]byte
+ var counterCopy [16]byte
+ copy(counterCopy[:], counter[:])
+
+ for len(in) >= 64 {
+ core(&block, &counterCopy, key, &Sigma)
+ for i, x := range block {
+ out[i] = in[i] ^ x
+ }
+ u := uint32(1)
+ for i := 8; i < 16; i++ {
+ u += uint32(counterCopy[i])
+ counterCopy[i] = byte(u)
+ u >>= 8
+ }
+ in = in[64:]
+ out = out[64:]
+ }
+
+ if len(in) > 0 {
+ core(&block, &counterCopy, key, &Sigma)
+ for i, v := range in {
+ out[i] = v ^ block[i]
+ }
+ }
+}
diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go
new file mode 100644
index 000000000..c971a99fa
--- /dev/null
+++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go
@@ -0,0 +1,212 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scrypt implements the scrypt key derivation function as defined in
+// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard
+// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf).
+package scrypt // import "golang.org/x/crypto/scrypt"
+
+import (
+ "crypto/sha256"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+
+ "golang.org/x/crypto/pbkdf2"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// blockCopy copies n numbers from src into dst.
+func blockCopy(dst, src []uint32, n int) {
+ copy(dst, src[:n])
+}
+
+// blockXOR XORs numbers from dst with n numbers from src.
+func blockXOR(dst, src []uint32, n int) {
+ for i, v := range src[:n] {
+ dst[i] ^= v
+ }
+}
+
+// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in,
+// and puts the result into both tmp and out.
+func salsaXOR(tmp *[16]uint32, in, out []uint32) {
+ w0 := tmp[0] ^ in[0]
+ w1 := tmp[1] ^ in[1]
+ w2 := tmp[2] ^ in[2]
+ w3 := tmp[3] ^ in[3]
+ w4 := tmp[4] ^ in[4]
+ w5 := tmp[5] ^ in[5]
+ w6 := tmp[6] ^ in[6]
+ w7 := tmp[7] ^ in[7]
+ w8 := tmp[8] ^ in[8]
+ w9 := tmp[9] ^ in[9]
+ w10 := tmp[10] ^ in[10]
+ w11 := tmp[11] ^ in[11]
+ w12 := tmp[12] ^ in[12]
+ w13 := tmp[13] ^ in[13]
+ w14 := tmp[14] ^ in[14]
+ w15 := tmp[15] ^ in[15]
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8
+ x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15
+
+ for i := 0; i < 8; i += 2 {
+ x4 ^= bits.RotateLeft32(x0+x12, 7)
+ x8 ^= bits.RotateLeft32(x4+x0, 9)
+ x12 ^= bits.RotateLeft32(x8+x4, 13)
+ x0 ^= bits.RotateLeft32(x12+x8, 18)
+
+ x9 ^= bits.RotateLeft32(x5+x1, 7)
+ x13 ^= bits.RotateLeft32(x9+x5, 9)
+ x1 ^= bits.RotateLeft32(x13+x9, 13)
+ x5 ^= bits.RotateLeft32(x1+x13, 18)
+
+ x14 ^= bits.RotateLeft32(x10+x6, 7)
+ x2 ^= bits.RotateLeft32(x14+x10, 9)
+ x6 ^= bits.RotateLeft32(x2+x14, 13)
+ x10 ^= bits.RotateLeft32(x6+x2, 18)
+
+ x3 ^= bits.RotateLeft32(x15+x11, 7)
+ x7 ^= bits.RotateLeft32(x3+x15, 9)
+ x11 ^= bits.RotateLeft32(x7+x3, 13)
+ x15 ^= bits.RotateLeft32(x11+x7, 18)
+
+ x1 ^= bits.RotateLeft32(x0+x3, 7)
+ x2 ^= bits.RotateLeft32(x1+x0, 9)
+ x3 ^= bits.RotateLeft32(x2+x1, 13)
+ x0 ^= bits.RotateLeft32(x3+x2, 18)
+
+ x6 ^= bits.RotateLeft32(x5+x4, 7)
+ x7 ^= bits.RotateLeft32(x6+x5, 9)
+ x4 ^= bits.RotateLeft32(x7+x6, 13)
+ x5 ^= bits.RotateLeft32(x4+x7, 18)
+
+ x11 ^= bits.RotateLeft32(x10+x9, 7)
+ x8 ^= bits.RotateLeft32(x11+x10, 9)
+ x9 ^= bits.RotateLeft32(x8+x11, 13)
+ x10 ^= bits.RotateLeft32(x9+x8, 18)
+
+ x12 ^= bits.RotateLeft32(x15+x14, 7)
+ x13 ^= bits.RotateLeft32(x12+x15, 9)
+ x14 ^= bits.RotateLeft32(x13+x12, 13)
+ x15 ^= bits.RotateLeft32(x14+x13, 18)
+ }
+ x0 += w0
+ x1 += w1
+ x2 += w2
+ x3 += w3
+ x4 += w4
+ x5 += w5
+ x6 += w6
+ x7 += w7
+ x8 += w8
+ x9 += w9
+ x10 += w10
+ x11 += w11
+ x12 += w12
+ x13 += w13
+ x14 += w14
+ x15 += w15
+
+ out[0], tmp[0] = x0, x0
+ out[1], tmp[1] = x1, x1
+ out[2], tmp[2] = x2, x2
+ out[3], tmp[3] = x3, x3
+ out[4], tmp[4] = x4, x4
+ out[5], tmp[5] = x5, x5
+ out[6], tmp[6] = x6, x6
+ out[7], tmp[7] = x7, x7
+ out[8], tmp[8] = x8, x8
+ out[9], tmp[9] = x9, x9
+ out[10], tmp[10] = x10, x10
+ out[11], tmp[11] = x11, x11
+ out[12], tmp[12] = x12, x12
+ out[13], tmp[13] = x13, x13
+ out[14], tmp[14] = x14, x14
+ out[15], tmp[15] = x15, x15
+}
+
+func blockMix(tmp *[16]uint32, in, out []uint32, r int) {
+ blockCopy(tmp[:], in[(2*r-1)*16:], 16)
+ for i := 0; i < 2*r; i += 2 {
+ salsaXOR(tmp, in[i*16:], out[i*8:])
+ salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:])
+ }
+}
+
+func integer(b []uint32, r int) uint64 {
+ j := (2*r - 1) * 16
+ return uint64(b[j]) | uint64(b[j+1])<<32
+}
+
+func smix(b []byte, r, N int, v, xy []uint32) {
+ var tmp [16]uint32
+ R := 32 * r
+ x := xy
+ y := xy[R:]
+
+ j := 0
+ for i := 0; i < R; i++ {
+ x[i] = binary.LittleEndian.Uint32(b[j:])
+ j += 4
+ }
+ for i := 0; i < N; i += 2 {
+ blockCopy(v[i*R:], x, R)
+ blockMix(&tmp, x, y, r)
+
+ blockCopy(v[(i+1)*R:], y, R)
+ blockMix(&tmp, y, x, r)
+ }
+ for i := 0; i < N; i += 2 {
+ j := int(integer(x, r) & uint64(N-1))
+ blockXOR(x, v[j*R:], R)
+ blockMix(&tmp, x, y, r)
+
+ j = int(integer(y, r) & uint64(N-1))
+ blockXOR(y, v[j*R:], R)
+ blockMix(&tmp, y, x, r)
+ }
+ j = 0
+ for _, v := range x[:R] {
+ binary.LittleEndian.PutUint32(b[j:], v)
+ j += 4
+ }
+}
+
+// Key derives a key from the password, salt, and cost parameters, returning
+// a byte slice of length keyLen that can be used as cryptographic key.
+//
+// N is a CPU/memory cost parameter, which must be a power of two greater than 1.
+// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the
+// limits, the function returns a nil byte slice and an error.
+//
+// For example, you can get a derived key for e.g. AES-256 (which needs a
+// 32-byte key) by doing:
+//
+// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32)
+//
+// The recommended parameters for interactive logins as of 2017 are N=32768, r=8
+// and p=1. The parameters N, r, and p should be increased as memory latency and
+// CPU parallelism increases; consider setting N to the highest power of 2 you
+// can derive within 100 milliseconds. Remember to get a good random salt.
+func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) {
+ if N <= 1 || N&(N-1) != 0 {
+ return nil, errors.New("scrypt: N must be > 1 and a power of 2")
+ }
+ if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r {
+ return nil, errors.New("scrypt: parameters are too large")
+ }
+
+ xy := make([]uint32, 64*r)
+ v := make([]uint32, 32*N*r)
+ b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New)
+
+ for i := 0; i < p; i++ {
+ smix(b[i*128*r:], r, N, v, xy)
+ }
+
+ return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil
+}
diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go
new file mode 100644
index 000000000..decd8cf9b
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/doc.go
@@ -0,0 +1,62 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sha3 implements the SHA-3 fixed-output-length hash functions and
+// the SHAKE variable-output-length hash functions defined by FIPS-202.
+//
+// Both types of hash function use the "sponge" construction and the Keccak
+// permutation. For a detailed specification see http://keccak.noekeon.org/
+//
+// # Guidance
+//
+// If you aren't sure what function you need, use SHAKE256 with at least 64
+// bytes of output. The SHAKE instances are faster than the SHA3 instances;
+// the latter have to allocate memory to conform to the hash.Hash interface.
+//
+// If you need a secret-key MAC (message authentication code), prepend the
+// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
+// output.
+//
+// # Security strengths
+//
+// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
+// strength against preimage attacks of x bits. Since they only produce "x"
+// bits of output, their collision-resistance is only "x/2" bits.
+//
+// The SHAKE-256 and -128 functions have a generic security strength of 256 and
+// 128 bits against all attacks, provided that at least 2x bits of their output
+// is used. Requesting more than 64 or 32 bytes of output, respectively, does
+// not increase the collision-resistance of the SHAKE functions.
+//
+// # The sponge construction
+//
+// A sponge builds a pseudo-random function from a public pseudo-random
+// permutation, by applying the permutation to a state of "rate + capacity"
+// bytes, but hiding "capacity" of the bytes.
+//
+// A sponge starts out with a zero state. To hash an input using a sponge, up
+// to "rate" bytes of the input are XORed into the sponge's state. The sponge
+// is then "full" and the permutation is applied to "empty" it. This process is
+// repeated until all the input has been "absorbed". The input is then padded.
+// The digest is "squeezed" from the sponge in the same way, except that output
+// is copied out instead of input being XORed in.
+//
+// A sponge is parameterized by its generic security strength, which is equal
+// to half its capacity; capacity + rate is equal to the permutation's width.
+// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
+// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
+//
+// # Recommendations
+//
+// The SHAKE functions are recommended for most new uses. They can produce
+// output of arbitrary length. SHAKE256, with an output length of at least
+// 64 bytes, provides 256-bit security against all attacks. The Keccak team
+// recommends it for most applications upgrading from SHA2-512. (NIST chose a
+// much stronger, but much slower, sponge instance for SHA3-512.)
+//
+// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
+// They produce output of the same length, with the same security strengths
+// against all attacks. This means, in particular, that SHA3-256 only has
+// 128-bit collision resistance, because its output length is 32 bytes.
+package sha3 // import "golang.org/x/crypto/sha3"
diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go
new file mode 100644
index 000000000..0d8043fd2
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/hashes.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// This file provides functions for creating instances of the SHA-3
+// and SHAKE hash functions, as well as utility functions for hashing
+// bytes.
+
+import (
+ "hash"
+)
+
+// New224 creates a new SHA3-224 hash.
+// Its generic security strength is 224 bits against preimage attacks,
+// and 112 bits against collision attacks.
+func New224() hash.Hash {
+ if h := new224Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
+}
+
+// New256 creates a new SHA3-256 hash.
+// Its generic security strength is 256 bits against preimage attacks,
+// and 128 bits against collision attacks.
+func New256() hash.Hash {
+ if h := new256Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
+}
+
+// New384 creates a new SHA3-384 hash.
+// Its generic security strength is 384 bits against preimage attacks,
+// and 192 bits against collision attacks.
+func New384() hash.Hash {
+ if h := new384Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
+}
+
+// New512 creates a new SHA3-512 hash.
+// Its generic security strength is 512 bits against preimage attacks,
+// and 256 bits against collision attacks.
+func New512() hash.Hash {
+ if h := new512Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
+}
+
+// NewLegacyKeccak256 creates a new Keccak-256 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New256 instead.
+func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
+
+// NewLegacyKeccak512 creates a new Keccak-512 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New512 instead.
+func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
+
+// Sum224 returns the SHA3-224 digest of the data.
+func Sum224(data []byte) (digest [28]byte) {
+ h := New224()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum256 returns the SHA3-256 digest of the data.
+func Sum256(data []byte) (digest [32]byte) {
+ h := New256()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum384 returns the SHA3-384 digest of the data.
+func Sum384(data []byte) (digest [48]byte) {
+ h := New384()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum512 returns the SHA3-512 digest of the data.
+func Sum512(data []byte) (digest [64]byte) {
+ h := New512()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go
new file mode 100644
index 000000000..c74fc20fc
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go
@@ -0,0 +1,28 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !gc || purego || !s390x
+// +build !gc purego !s390x
+
+package sha3
+
+import (
+ "hash"
+)
+
+// new224Asm returns an assembly implementation of SHA3-224 if available,
+// otherwise it returns nil.
+func new224Asm() hash.Hash { return nil }
+
+// new256Asm returns an assembly implementation of SHA3-256 if available,
+// otherwise it returns nil.
+func new256Asm() hash.Hash { return nil }
+
+// new384Asm returns an assembly implementation of SHA3-384 if available,
+// otherwise it returns nil.
+func new384Asm() hash.Hash { return nil }
+
+// new512Asm returns an assembly implementation of SHA3-512 if available,
+// otherwise it returns nil.
+func new512Asm() hash.Hash { return nil }
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go
new file mode 100644
index 000000000..0f4ae8bac
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/keccakf.go
@@ -0,0 +1,413 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || purego || !gc
+// +build !amd64 purego !gc
+
+package sha3
+
+// rc stores the round constants for use in the ι step.
+var rc = [24]uint64{
+ 0x0000000000000001,
+ 0x0000000000008082,
+ 0x800000000000808A,
+ 0x8000000080008000,
+ 0x000000000000808B,
+ 0x0000000080000001,
+ 0x8000000080008081,
+ 0x8000000000008009,
+ 0x000000000000008A,
+ 0x0000000000000088,
+ 0x0000000080008009,
+ 0x000000008000000A,
+ 0x000000008000808B,
+ 0x800000000000008B,
+ 0x8000000000008089,
+ 0x8000000000008003,
+ 0x8000000000008002,
+ 0x8000000000000080,
+ 0x000000000000800A,
+ 0x800000008000000A,
+ 0x8000000080008081,
+ 0x8000000000008080,
+ 0x0000000080000001,
+ 0x8000000080008008,
+}
+
+// keccakF1600 applies the Keccak permutation to a 1600b-wide
+// state represented as a slice of 25 uint64s.
+func keccakF1600(a *[25]uint64) {
+ // Implementation translated from Keccak-inplace.c
+ // in the keccak reference code.
+ var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
+
+ for i := 0; i < 24; i += 4 {
+ // Combines the 5 steps in each round into 2 steps.
+ // Unrolls 4 rounds per loop and spreads some steps across rounds.
+
+ // Round 1
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[6] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[12] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[18] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[24] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[16] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[22] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[3] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[1] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[7] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[19] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[11] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[23] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[4] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[2] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[8] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[14] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 2
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[16] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[7] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[23] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[14] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[11] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[2] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[18] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[6] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[22] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[4] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[1] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[8] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[24] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[12] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[3] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[19] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 3
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[11] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[22] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[8] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[19] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[1] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[12] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[23] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[16] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[2] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[24] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[6] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[3] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[14] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[7] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[18] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[4] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 4
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[1] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[2] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[3] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[4] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[6] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[7] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[8] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[11] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[12] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[14] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[16] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[18] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[19] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[22] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[23] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[24] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+ }
+}
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
new file mode 100644
index 000000000..248a38241
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+// +build amd64,!purego,gc
+
+package sha3
+
+// This function is implemented in keccakf_amd64.s.
+
+//go:noescape
+
+func keccakF1600(a *[25]uint64)
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
new file mode 100644
index 000000000..4cfa54383
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
@@ -0,0 +1,391 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+// +build amd64,!purego,gc
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources at https://github.com/gvanas/KeccakCodePackage
+
+// Offsets in state
+#define _ba (0*8)
+#define _be (1*8)
+#define _bi (2*8)
+#define _bo (3*8)
+#define _bu (4*8)
+#define _ga (5*8)
+#define _ge (6*8)
+#define _gi (7*8)
+#define _go (8*8)
+#define _gu (9*8)
+#define _ka (10*8)
+#define _ke (11*8)
+#define _ki (12*8)
+#define _ko (13*8)
+#define _ku (14*8)
+#define _ma (15*8)
+#define _me (16*8)
+#define _mi (17*8)
+#define _mo (18*8)
+#define _mu (19*8)
+#define _sa (20*8)
+#define _se (21*8)
+#define _si (22*8)
+#define _so (23*8)
+#define _su (24*8)
+
+// Temporary registers
+#define rT1 AX
+
+// Round vars
+#define rpState DI
+#define rpStack SP
+
+#define rDa BX
+#define rDe CX
+#define rDi DX
+#define rDo R8
+#define rDu R9
+
+#define rBa R10
+#define rBe R11
+#define rBi R12
+#define rBo R13
+#define rBu R14
+
+#define rCa SI
+#define rCe BP
+#define rCi rBi
+#define rCo rBo
+#define rCu R15
+
+#define MOVQ_RBI_RCE MOVQ rBi, rCe
+#define XORQ_RT1_RCA XORQ rT1, rCa
+#define XORQ_RT1_RCE XORQ rT1, rCe
+#define XORQ_RBA_RCU XORQ rBa, rCu
+#define XORQ_RBE_RCU XORQ rBe, rCu
+#define XORQ_RDU_RCU XORQ rDu, rCu
+#define XORQ_RDA_RCA XORQ rDa, rCa
+#define XORQ_RDE_RCE XORQ rDe, rCe
+
+#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
+ /* Prepare round */ \
+ MOVQ rCe, rDa; \
+ ROLQ $1, rDa; \
+ \
+ MOVQ _bi(iState), rCi; \
+ XORQ _gi(iState), rDi; \
+ XORQ rCu, rDa; \
+ XORQ _ki(iState), rCi; \
+ XORQ _mi(iState), rDi; \
+ XORQ rDi, rCi; \
+ \
+ MOVQ rCi, rDe; \
+ ROLQ $1, rDe; \
+ \
+ MOVQ _bo(iState), rCo; \
+ XORQ _go(iState), rDo; \
+ XORQ rCa, rDe; \
+ XORQ _ko(iState), rCo; \
+ XORQ _mo(iState), rDo; \
+ XORQ rDo, rCo; \
+ \
+ MOVQ rCo, rDi; \
+ ROLQ $1, rDi; \
+ \
+ MOVQ rCu, rDo; \
+ XORQ rCe, rDi; \
+ ROLQ $1, rDo; \
+ \
+ MOVQ rCa, rDu; \
+ XORQ rCi, rDo; \
+ ROLQ $1, rDu; \
+ \
+ /* Result b */ \
+ MOVQ _ba(iState), rBa; \
+ MOVQ _ge(iState), rBe; \
+ XORQ rCo, rDu; \
+ MOVQ _ki(iState), rBi; \
+ MOVQ _mo(iState), rBo; \
+ MOVQ _su(iState), rBu; \
+ XORQ rDe, rBe; \
+ ROLQ $44, rBe; \
+ XORQ rDi, rBi; \
+ XORQ rDa, rBa; \
+ ROLQ $43, rBi; \
+ \
+ MOVQ rBe, rCa; \
+ MOVQ rc, rT1; \
+ ORQ rBi, rCa; \
+ XORQ rBa, rT1; \
+ XORQ rT1, rCa; \
+ MOVQ rCa, _ba(oState); \
+ \
+ XORQ rDu, rBu; \
+ ROLQ $14, rBu; \
+ MOVQ rBa, rCu; \
+ ANDQ rBe, rCu; \
+ XORQ rBu, rCu; \
+ MOVQ rCu, _bu(oState); \
+ \
+ XORQ rDo, rBo; \
+ ROLQ $21, rBo; \
+ MOVQ rBo, rT1; \
+ ANDQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _bi(oState); \
+ \
+ NOTQ rBi; \
+ ORQ rBa, rBu; \
+ ORQ rBo, rBi; \
+ XORQ rBo, rBu; \
+ XORQ rBe, rBi; \
+ MOVQ rBu, _bo(oState); \
+ MOVQ rBi, _be(oState); \
+ B_RBI_RCE; \
+ \
+ /* Result g */ \
+ MOVQ _gu(iState), rBe; \
+ XORQ rDu, rBe; \
+ MOVQ _ka(iState), rBi; \
+ ROLQ $20, rBe; \
+ XORQ rDa, rBi; \
+ ROLQ $3, rBi; \
+ MOVQ _bo(iState), rBa; \
+ MOVQ rBe, rT1; \
+ ORQ rBi, rT1; \
+ XORQ rDo, rBa; \
+ MOVQ _me(iState), rBo; \
+ MOVQ _si(iState), rBu; \
+ ROLQ $28, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ga(oState); \
+ G_RT1_RCA; \
+ \
+ XORQ rDe, rBo; \
+ ROLQ $45, rBo; \
+ MOVQ rBi, rT1; \
+ ANDQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _ge(oState); \
+ G_RT1_RCE; \
+ \
+ XORQ rDi, rBu; \
+ ROLQ $61, rBu; \
+ MOVQ rBu, rT1; \
+ ORQ rBa, rT1; \
+ XORQ rBo, rT1; \
+ MOVQ rT1, _go(oState); \
+ \
+ ANDQ rBe, rBa; \
+ XORQ rBu, rBa; \
+ MOVQ rBa, _gu(oState); \
+ NOTQ rBu; \
+ G_RBA_RCU; \
+ \
+ ORQ rBu, rBo; \
+ XORQ rBi, rBo; \
+ MOVQ rBo, _gi(oState); \
+ \
+ /* Result k */ \
+ MOVQ _be(iState), rBa; \
+ MOVQ _gi(iState), rBe; \
+ MOVQ _ko(iState), rBi; \
+ MOVQ _mu(iState), rBo; \
+ MOVQ _sa(iState), rBu; \
+ XORQ rDi, rBe; \
+ ROLQ $6, rBe; \
+ XORQ rDo, rBi; \
+ ROLQ $25, rBi; \
+ MOVQ rBe, rT1; \
+ ORQ rBi, rT1; \
+ XORQ rDe, rBa; \
+ ROLQ $1, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ka(oState); \
+ K_RT1_RCA; \
+ \
+ XORQ rDu, rBo; \
+ ROLQ $8, rBo; \
+ MOVQ rBi, rT1; \
+ ANDQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _ke(oState); \
+ K_RT1_RCE; \
+ \
+ XORQ rDa, rBu; \
+ ROLQ $18, rBu; \
+ NOTQ rBo; \
+ MOVQ rBo, rT1; \
+ ANDQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _ki(oState); \
+ \
+ MOVQ rBu, rT1; \
+ ORQ rBa, rT1; \
+ XORQ rBo, rT1; \
+ MOVQ rT1, _ko(oState); \
+ \
+ ANDQ rBe, rBa; \
+ XORQ rBu, rBa; \
+ MOVQ rBa, _ku(oState); \
+ K_RBA_RCU; \
+ \
+ /* Result m */ \
+ MOVQ _ga(iState), rBe; \
+ XORQ rDa, rBe; \
+ MOVQ _ke(iState), rBi; \
+ ROLQ $36, rBe; \
+ XORQ rDe, rBi; \
+ MOVQ _bu(iState), rBa; \
+ ROLQ $10, rBi; \
+ MOVQ rBe, rT1; \
+ MOVQ _mi(iState), rBo; \
+ ANDQ rBi, rT1; \
+ XORQ rDu, rBa; \
+ MOVQ _so(iState), rBu; \
+ ROLQ $27, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ma(oState); \
+ M_RT1_RCA; \
+ \
+ XORQ rDi, rBo; \
+ ROLQ $15, rBo; \
+ MOVQ rBi, rT1; \
+ ORQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _me(oState); \
+ M_RT1_RCE; \
+ \
+ XORQ rDo, rBu; \
+ ROLQ $56, rBu; \
+ NOTQ rBo; \
+ MOVQ rBo, rT1; \
+ ORQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _mi(oState); \
+ \
+ ORQ rBa, rBe; \
+ XORQ rBu, rBe; \
+ MOVQ rBe, _mu(oState); \
+ \
+ ANDQ rBa, rBu; \
+ XORQ rBo, rBu; \
+ MOVQ rBu, _mo(oState); \
+ M_RBE_RCU; \
+ \
+ /* Result s */ \
+ MOVQ _bi(iState), rBa; \
+ MOVQ _go(iState), rBe; \
+ MOVQ _ku(iState), rBi; \
+ XORQ rDi, rBa; \
+ MOVQ _ma(iState), rBo; \
+ ROLQ $62, rBa; \
+ XORQ rDo, rBe; \
+ MOVQ _se(iState), rBu; \
+ ROLQ $55, rBe; \
+ \
+ XORQ rDu, rBi; \
+ MOVQ rBa, rDu; \
+ XORQ rDe, rBu; \
+ ROLQ $2, rBu; \
+ ANDQ rBe, rDu; \
+ XORQ rBu, rDu; \
+ MOVQ rDu, _su(oState); \
+ \
+ ROLQ $39, rBi; \
+ S_RDU_RCU; \
+ NOTQ rBe; \
+ XORQ rDa, rBo; \
+ MOVQ rBe, rDa; \
+ ANDQ rBi, rDa; \
+ XORQ rBa, rDa; \
+ MOVQ rDa, _sa(oState); \
+ S_RDA_RCA; \
+ \
+ ROLQ $41, rBo; \
+ MOVQ rBi, rDe; \
+ ORQ rBo, rDe; \
+ XORQ rBe, rDe; \
+ MOVQ rDe, _se(oState); \
+ S_RDE_RCE; \
+ \
+ MOVQ rBo, rDi; \
+ MOVQ rBu, rDo; \
+ ANDQ rBu, rDi; \
+ ORQ rBa, rDo; \
+ XORQ rBi, rDi; \
+ XORQ rBo, rDo; \
+ MOVQ rDi, _si(oState); \
+ MOVQ rDo, _so(oState) \
+
+// func keccakF1600(state *[25]uint64)
+TEXT ·keccakF1600(SB), 0, $200-8
+ MOVQ state+0(FP), rpState
+
+ // Convert the user state into an internal state
+ NOTQ _be(rpState)
+ NOTQ _bi(rpState)
+ NOTQ _go(rpState)
+ NOTQ _ki(rpState)
+ NOTQ _mi(rpState)
+ NOTQ _sa(rpState)
+
+ // Execute the KeccakF permutation
+ MOVQ _ba(rpState), rCa
+ MOVQ _be(rpState), rCe
+ MOVQ _bu(rpState), rCu
+
+ XORQ _ga(rpState), rCa
+ XORQ _ge(rpState), rCe
+ XORQ _gu(rpState), rCu
+
+ XORQ _ka(rpState), rCa
+ XORQ _ke(rpState), rCe
+ XORQ _ku(rpState), rCu
+
+ XORQ _ma(rpState), rCa
+ XORQ _me(rpState), rCe
+ XORQ _mu(rpState), rCu
+
+ XORQ _sa(rpState), rCa
+ XORQ _se(rpState), rCe
+ MOVQ _si(rpState), rDi
+ MOVQ _so(rpState), rDo
+ XORQ _su(rpState), rCu
+
+ mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
+
+ // Revert the internal state to the user state
+ NOTQ _be(rpState)
+ NOTQ _bi(rpState)
+ NOTQ _go(rpState)
+ NOTQ _ki(rpState)
+ NOTQ _mi(rpState)
+ NOTQ _sa(rpState)
+
+ RET
diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go
new file mode 100644
index 000000000..8b4453aac
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/register.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.4
+// +build go1.4
+
+package sha3
+
+import (
+ "crypto"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.SHA3_224, New224)
+ crypto.RegisterHash(crypto.SHA3_256, New256)
+ crypto.RegisterHash(crypto.SHA3_384, New384)
+ crypto.RegisterHash(crypto.SHA3_512, New512)
+}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go
new file mode 100644
index 000000000..fa182beb4
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/sha3.go
@@ -0,0 +1,193 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// spongeDirection indicates the direction bytes are flowing through the sponge.
+type spongeDirection int
+
+const (
+ // spongeAbsorbing indicates that the sponge is absorbing input.
+ spongeAbsorbing spongeDirection = iota
+ // spongeSqueezing indicates that the sponge is being squeezed.
+ spongeSqueezing
+)
+
+const (
+ // maxRate is the maximum size of the internal buffer. SHAKE-256
+ // currently needs the largest buffer.
+ maxRate = 168
+)
+
+type state struct {
+ // Generic sponge components.
+ a [25]uint64 // main state of the hash
+ buf []byte // points into storage
+ rate int // the number of bytes of state to use
+
+ // dsbyte contains the "domain separation" bits and the first bit of
+ // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
+ // SHA-3 and SHAKE functions by appending bitstrings to the message.
+ // Using a little-endian bit-ordering convention, these are "01" for SHA-3
+ // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
+ // padding rule from section 5.1 is applied to pad the message to a multiple
+ // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
+ // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
+ // giving 00000110b (0x06) and 00011111b (0x1f).
+ // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
+ // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
+ // Extendable-Output Functions (May 2014)"
+ dsbyte byte
+
+ storage storageBuf
+
+ // Specific to SHA-3 and SHAKE.
+ outputLen int // the default output size in bytes
+ state spongeDirection // whether the sponge is absorbing or squeezing
+}
+
+// BlockSize returns the rate of sponge underlying this hash function.
+func (d *state) BlockSize() int { return d.rate }
+
+// Size returns the output size of the hash function in bytes.
+func (d *state) Size() int { return d.outputLen }
+
+// Reset clears the internal state by zeroing the sponge state and
+// the byte buffer, and setting Sponge.state to absorbing.
+func (d *state) Reset() {
+ // Zero the permutation's state.
+ for i := range d.a {
+ d.a[i] = 0
+ }
+ d.state = spongeAbsorbing
+ d.buf = d.storage.asBytes()[:0]
+}
+
+func (d *state) clone() *state {
+ ret := *d
+ if ret.state == spongeAbsorbing {
+ ret.buf = ret.storage.asBytes()[:len(ret.buf)]
+ } else {
+ ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate]
+ }
+
+ return &ret
+}
+
+// permute applies the KeccakF-1600 permutation. It handles
+// any input-output buffering.
+func (d *state) permute() {
+ switch d.state {
+ case spongeAbsorbing:
+ // If we're absorbing, we need to xor the input into the state
+ // before applying the permutation.
+ xorIn(d, d.buf)
+ d.buf = d.storage.asBytes()[:0]
+ keccakF1600(&d.a)
+ case spongeSqueezing:
+ // If we're squeezing, we need to apply the permutation before
+ // copying more output.
+ keccakF1600(&d.a)
+ d.buf = d.storage.asBytes()[:d.rate]
+ copyOut(d, d.buf)
+ }
+}
+
+// pads appends the domain separation bits in dsbyte, applies
+// the multi-bitrate 10..1 padding rule, and permutes the state.
+func (d *state) padAndPermute(dsbyte byte) {
+ if d.buf == nil {
+ d.buf = d.storage.asBytes()[:0]
+ }
+ // Pad with this instance's domain-separator bits. We know that there's
+ // at least one byte of space in d.buf because, if it were full,
+ // permute would have been called to empty it. dsbyte also contains the
+ // first one bit for the padding. See the comment in the state struct.
+ d.buf = append(d.buf, dsbyte)
+ zerosStart := len(d.buf)
+ d.buf = d.storage.asBytes()[:d.rate]
+ for i := zerosStart; i < d.rate; i++ {
+ d.buf[i] = 0
+ }
+ // This adds the final one bit for the padding. Because of the way that
+ // bits are numbered from the LSB upwards, the final bit is the MSB of
+ // the last byte.
+ d.buf[d.rate-1] ^= 0x80
+ // Apply the permutation
+ d.permute()
+ d.state = spongeSqueezing
+ d.buf = d.storage.asBytes()[:d.rate]
+ copyOut(d, d.buf)
+}
+
+// Write absorbs more data into the hash's state. It produces an error
+// if more data is written to the ShakeHash after writing
+func (d *state) Write(p []byte) (written int, err error) {
+ if d.state != spongeAbsorbing {
+ panic("sha3: write to sponge after read")
+ }
+ if d.buf == nil {
+ d.buf = d.storage.asBytes()[:0]
+ }
+ written = len(p)
+
+ for len(p) > 0 {
+ if len(d.buf) == 0 && len(p) >= d.rate {
+ // The fast path; absorb a full "rate" bytes of input and apply the permutation.
+ xorIn(d, p[:d.rate])
+ p = p[d.rate:]
+ keccakF1600(&d.a)
+ } else {
+ // The slow path; buffer the input until we can fill the sponge, and then xor it in.
+ todo := d.rate - len(d.buf)
+ if todo > len(p) {
+ todo = len(p)
+ }
+ d.buf = append(d.buf, p[:todo]...)
+ p = p[todo:]
+
+ // If the sponge is full, apply the permutation.
+ if len(d.buf) == d.rate {
+ d.permute()
+ }
+ }
+ }
+
+ return
+}
+
+// Read squeezes an arbitrary number of bytes from the sponge.
+func (d *state) Read(out []byte) (n int, err error) {
+ // If we're still absorbing, pad and apply the permutation.
+ if d.state == spongeAbsorbing {
+ d.padAndPermute(d.dsbyte)
+ }
+
+ n = len(out)
+
+ // Now, do the squeezing.
+ for len(out) > 0 {
+ n := copy(out, d.buf)
+ d.buf = d.buf[n:]
+ out = out[n:]
+
+ // Apply the permutation if we've squeezed the sponge dry.
+ if len(d.buf) == 0 {
+ d.permute()
+ }
+ }
+
+ return
+}
+
+// Sum applies padding to the hash state and then squeezes out the desired
+// number of output bytes.
+func (d *state) Sum(in []byte) []byte {
+ // Make a copy of the original hash so that caller can keep writing
+ // and summing.
+ dup := d.clone()
+ hash := make([]byte, dup.outputLen)
+ dup.Read(hash)
+ return append(in, hash...)
+}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
new file mode 100644
index 000000000..63a3edb4c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
@@ -0,0 +1,287 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+// +build gc,!purego
+
+package sha3
+
+// This file contains code for using the 'compute intermediate
+// message digest' (KIMD) and 'compute last message digest' (KLMD)
+// instructions to compute SHA-3 and SHAKE hashes on IBM Z.
+
+import (
+ "hash"
+
+ "golang.org/x/sys/cpu"
+)
+
+// codes represent 7-bit KIMD/KLMD function codes as defined in
+// the Principles of Operation.
+type code uint64
+
+const (
+ // function codes for KIMD/KLMD
+ sha3_224 code = 32
+ sha3_256 = 33
+ sha3_384 = 34
+ sha3_512 = 35
+ shake_128 = 36
+ shake_256 = 37
+ nopad = 0x100
+)
+
+// kimd is a wrapper for the 'compute intermediate message digest' instruction.
+// src must be a multiple of the rate for the given function code.
+//
+//go:noescape
+func kimd(function code, chain *[200]byte, src []byte)
+
+// klmd is a wrapper for the 'compute last message digest' instruction.
+// src padding is handled by the instruction.
+//
+//go:noescape
+func klmd(function code, chain *[200]byte, dst, src []byte)
+
+type asmState struct {
+ a [200]byte // 1600 bit state
+ buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
+ rate int // equivalent to block size
+ storage [3072]byte // underlying storage for buf
+ outputLen int // output length if fixed, 0 if not
+ function code // KIMD/KLMD function code
+ state spongeDirection // whether the sponge is absorbing or squeezing
+}
+
+func newAsmState(function code) *asmState {
+ var s asmState
+ s.function = function
+ switch function {
+ case sha3_224:
+ s.rate = 144
+ s.outputLen = 28
+ case sha3_256:
+ s.rate = 136
+ s.outputLen = 32
+ case sha3_384:
+ s.rate = 104
+ s.outputLen = 48
+ case sha3_512:
+ s.rate = 72
+ s.outputLen = 64
+ case shake_128:
+ s.rate = 168
+ case shake_256:
+ s.rate = 136
+ default:
+ panic("sha3: unrecognized function code")
+ }
+
+ // limit s.buf size to a multiple of s.rate
+ s.resetBuf()
+ return &s
+}
+
+func (s *asmState) clone() *asmState {
+ c := *s
+ c.buf = c.storage[:len(s.buf):cap(s.buf)]
+ return &c
+}
+
+// copyIntoBuf copies b into buf. It will panic if there is not enough space to
+// store all of b.
+func (s *asmState) copyIntoBuf(b []byte) {
+ bufLen := len(s.buf)
+ s.buf = s.buf[:len(s.buf)+len(b)]
+ copy(s.buf[bufLen:], b)
+}
+
+// resetBuf points buf at storage, sets the length to 0 and sets cap to be a
+// multiple of the rate.
+func (s *asmState) resetBuf() {
+ max := (cap(s.storage) / s.rate) * s.rate
+ s.buf = s.storage[:0:max]
+}
+
+// Write (via the embedded io.Writer interface) adds more data to the running hash.
+// It never returns an error.
+func (s *asmState) Write(b []byte) (int, error) {
+ if s.state != spongeAbsorbing {
+ panic("sha3: write to sponge after read")
+ }
+ length := len(b)
+ for len(b) > 0 {
+ if len(s.buf) == 0 && len(b) >= cap(s.buf) {
+ // Hash the data directly and push any remaining bytes
+ // into the buffer.
+ remainder := len(b) % s.rate
+ kimd(s.function, &s.a, b[:len(b)-remainder])
+ if remainder != 0 {
+ s.copyIntoBuf(b[len(b)-remainder:])
+ }
+ return length, nil
+ }
+
+ if len(s.buf) == cap(s.buf) {
+ // flush the buffer
+ kimd(s.function, &s.a, s.buf)
+ s.buf = s.buf[:0]
+ }
+
+ // copy as much as we can into the buffer
+ n := len(b)
+ if len(b) > cap(s.buf)-len(s.buf) {
+ n = cap(s.buf) - len(s.buf)
+ }
+ s.copyIntoBuf(b[:n])
+ b = b[n:]
+ }
+ return length, nil
+}
+
+// Read squeezes an arbitrary number of bytes from the sponge.
+func (s *asmState) Read(out []byte) (n int, err error) {
+ n = len(out)
+
+ // need to pad if we were absorbing
+ if s.state == spongeAbsorbing {
+ s.state = spongeSqueezing
+
+ // write hash directly into out if possible
+ if len(out)%s.rate == 0 {
+ klmd(s.function, &s.a, out, s.buf) // len(out) may be 0
+ s.buf = s.buf[:0]
+ return
+ }
+
+ // write hash into buffer
+ max := cap(s.buf)
+ if max > len(out) {
+ max = (len(out)/s.rate)*s.rate + s.rate
+ }
+ klmd(s.function, &s.a, s.buf[:max], s.buf)
+ s.buf = s.buf[:max]
+ }
+
+ for len(out) > 0 {
+ // flush the buffer
+ if len(s.buf) != 0 {
+ c := copy(out, s.buf)
+ out = out[c:]
+ s.buf = s.buf[c:]
+ continue
+ }
+
+ // write hash directly into out if possible
+ if len(out)%s.rate == 0 {
+ klmd(s.function|nopad, &s.a, out, nil)
+ return
+ }
+
+ // write hash into buffer
+ s.resetBuf()
+ if cap(s.buf) > len(out) {
+ s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate]
+ }
+ klmd(s.function|nopad, &s.a, s.buf, nil)
+ }
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (s *asmState) Sum(b []byte) []byte {
+ if s.outputLen == 0 {
+ panic("sha3: cannot call Sum on SHAKE functions")
+ }
+
+ // Copy the state to preserve the original.
+ a := s.a
+
+ // Hash the buffer. Note that we don't clear it because we
+ // aren't updating the state.
+ klmd(s.function, &a, nil, s.buf)
+ return append(b, a[:s.outputLen]...)
+}
+
+// Reset resets the Hash to its initial state.
+func (s *asmState) Reset() {
+ for i := range s.a {
+ s.a[i] = 0
+ }
+ s.resetBuf()
+ s.state = spongeAbsorbing
+}
+
+// Size returns the number of bytes Sum will return.
+func (s *asmState) Size() int {
+ return s.outputLen
+}
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (s *asmState) BlockSize() int {
+ return s.rate
+}
+
+// Clone returns a copy of the ShakeHash in its current state.
+func (s *asmState) Clone() ShakeHash {
+ return s.clone()
+}
+
+// new224Asm returns an assembly implementation of SHA3-224 if available,
+// otherwise it returns nil.
+func new224Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_224)
+ }
+ return nil
+}
+
+// new256Asm returns an assembly implementation of SHA3-256 if available,
+// otherwise it returns nil.
+func new256Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_256)
+ }
+ return nil
+}
+
+// new384Asm returns an assembly implementation of SHA3-384 if available,
+// otherwise it returns nil.
+func new384Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_384)
+ }
+ return nil
+}
+
+// new512Asm returns an assembly implementation of SHA3-512 if available,
+// otherwise it returns nil.
+func new512Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_512)
+ }
+ return nil
+}
+
+// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
+// otherwise it returns nil.
+func newShake128Asm() ShakeHash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(shake_128)
+ }
+ return nil
+}
+
+// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
+// otherwise it returns nil.
+func newShake256Asm() ShakeHash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(shake_256)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
new file mode 100644
index 000000000..a0e051b04
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+// +build gc,!purego
+
+#include "textflag.h"
+
+// func kimd(function code, chain *[200]byte, src []byte)
+TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
+ MOVD function+0(FP), R0
+ MOVD chain+8(FP), R1
+ LMG src+16(FP), R2, R3 // R2=base, R3=len
+
+continue:
+ WORD $0xB93E0002 // KIMD --, R2
+ BVS continue // continue if interrupted
+ MOVD $0, R0 // reset R0 for pre-go1.8 compilers
+ RET
+
+// func klmd(function code, chain *[200]byte, dst, src []byte)
+TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
+ // TODO: SHAKE support
+ MOVD function+0(FP), R0
+ MOVD chain+8(FP), R1
+ LMG dst+16(FP), R2, R3 // R2=base, R3=len
+ LMG src+40(FP), R4, R5 // R4=base, R5=len
+
+continue:
+ WORD $0xB93F0024 // KLMD R2, R4
+ BVS continue // continue if interrupted
+ MOVD $0, R0 // reset R0 for pre-go1.8 compilers
+ RET
diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
new file mode 100644
index 000000000..d7be2954a
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/shake.go
@@ -0,0 +1,173 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// This file defines the ShakeHash interface, and provides
+// functions for creating SHAKE and cSHAKE instances, as well as utility
+// functions for hashing bytes to arbitrary-length output.
+//
+//
+// SHAKE implementation is based on FIPS PUB 202 [1]
+// cSHAKE implementations is based on NIST SP 800-185 [2]
+//
+// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
+// [2] https://doi.org/10.6028/NIST.SP.800-185
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+// ShakeHash defines the interface to hash functions that
+// support arbitrary-length output.
+type ShakeHash interface {
+ // Write absorbs more data into the hash's state. It panics if input is
+ // written to it after output has been read from it.
+ io.Writer
+
+ // Read reads more output from the hash; reading affects the hash's
+ // state. (ShakeHash.Read is thus very different from Hash.Sum)
+ // It never returns an error.
+ io.Reader
+
+ // Clone returns a copy of the ShakeHash in its current state.
+ Clone() ShakeHash
+
+ // Reset resets the ShakeHash to its initial state.
+ Reset()
+}
+
+// cSHAKE specific context
+type cshakeState struct {
+ *state // SHA-3 state context and Read/Write operations
+
+ // initBlock is the cSHAKE specific initialization set of bytes. It is initialized
+ // by newCShake function and stores concatenation of N followed by S, encoded
+ // by the method specified in 3.3 of [1].
+ // It is stored here in order for Reset() to be able to put context into
+ // initial state.
+ initBlock []byte
+}
+
+// Consts for configuring initial SHA-3 state
+const (
+ dsbyteShake = 0x1f
+ dsbyteCShake = 0x04
+ rate128 = 168
+ rate256 = 136
+)
+
+func bytepad(input []byte, w int) []byte {
+ // leftEncode always returns max 9 bytes
+ buf := make([]byte, 0, 9+len(input)+w)
+ buf = append(buf, leftEncode(uint64(w))...)
+ buf = append(buf, input...)
+ padlen := w - (len(buf) % w)
+ return append(buf, make([]byte, padlen)...)
+}
+
+func leftEncode(value uint64) []byte {
+ var b [9]byte
+ binary.BigEndian.PutUint64(b[1:], value)
+ // Trim all but last leading zero bytes
+ i := byte(1)
+ for i < 8 && b[i] == 0 {
+ i++
+ }
+ // Prepend number of encoded bytes
+ b[i-1] = 9 - i
+ return b[i-1:]
+}
+
+func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash {
+ c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}}
+
+ // leftEncode returns max 9 bytes
+ c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
+ c.initBlock = append(c.initBlock, N...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
+ c.initBlock = append(c.initBlock, S...)
+ c.Write(bytepad(c.initBlock, c.rate))
+ return &c
+}
+
+// Reset resets the hash to initial state.
+func (c *cshakeState) Reset() {
+ c.state.Reset()
+ c.Write(bytepad(c.initBlock, c.rate))
+}
+
+// Clone returns copy of a cSHAKE context within its current state.
+func (c *cshakeState) Clone() ShakeHash {
+ b := make([]byte, len(c.initBlock))
+ copy(b, c.initBlock)
+ return &cshakeState{state: c.clone(), initBlock: b}
+}
+
+// Clone returns copy of SHAKE context within its current state.
+func (c *state) Clone() ShakeHash {
+ return c.clone()
+}
+
+// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
+// Its generic security strength is 128 bits against all attacks if at
+// least 32 bytes of its output are used.
+func NewShake128() ShakeHash {
+ if h := newShake128Asm(); h != nil {
+ return h
+ }
+ return &state{rate: rate128, dsbyte: dsbyteShake}
+}
+
+// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
+// Its generic security strength is 256 bits against all attacks if
+// at least 64 bytes of its output are used.
+func NewShake256() ShakeHash {
+ if h := newShake256Asm(); h != nil {
+ return h
+ }
+ return &state{rate: rate256, dsbyte: dsbyteShake}
+}
+
+// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
+// a customizable variant of SHAKE128.
+// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
+// desired. S is a customization byte string used for domain separation - two cSHAKE
+// computations on same input with different S yield unrelated outputs.
+// When N and S are both empty, this is equivalent to NewShake128.
+func NewCShake128(N, S []byte) ShakeHash {
+ if len(N) == 0 && len(S) == 0 {
+ return NewShake128()
+ }
+ return newCShake(N, S, rate128, dsbyteCShake)
+}
+
+// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
+// a customizable variant of SHAKE256.
+// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
+// desired. S is a customization byte string used for domain separation - two cSHAKE
+// computations on same input with different S yield unrelated outputs.
+// When N and S are both empty, this is equivalent to NewShake256.
+func NewCShake256(N, S []byte) ShakeHash {
+ if len(N) == 0 && len(S) == 0 {
+ return NewShake256()
+ }
+ return newCShake(N, S, rate256, dsbyteCShake)
+}
+
+// ShakeSum128 writes an arbitrary-length digest of data into hash.
+func ShakeSum128(hash, data []byte) {
+ h := NewShake128()
+ h.Write(data)
+ h.Read(hash)
+}
+
+// ShakeSum256 writes an arbitrary-length digest of data into hash.
+func ShakeSum256(hash, data []byte) {
+ h := NewShake256()
+ h.Write(data)
+ h.Read(hash)
+}
diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go
new file mode 100644
index 000000000..5c0710ef9
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !gc || purego || !s390x
+// +build !gc purego !s390x
+
+package sha3
+
+// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
+// otherwise it returns nil.
+func newShake128Asm() ShakeHash {
+ return nil
+}
+
+// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
+// otherwise it returns nil.
+func newShake256Asm() ShakeHash {
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go
new file mode 100644
index 000000000..59c8eb941
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/xor.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (!amd64 && !386 && !ppc64le) || purego
+// +build !amd64,!386,!ppc64le purego
+
+package sha3
+
+// A storageBuf is an aligned array of maxRate bytes.
+type storageBuf [maxRate]byte
+
+func (b *storageBuf) asBytes() *[maxRate]byte {
+ return (*[maxRate]byte)(b)
+}
+
+var (
+ xorIn = xorInGeneric
+ copyOut = copyOutGeneric
+ xorInUnaligned = xorInGeneric
+ copyOutUnaligned = copyOutGeneric
+)
+
+const xorImplementationUnaligned = "generic"
diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go
new file mode 100644
index 000000000..8d9477112
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+import "encoding/binary"
+
+// xorInGeneric xors the bytes in buf into the state; it
+// makes no non-portable assumptions about memory layout
+// or alignment.
+func xorInGeneric(d *state, buf []byte) {
+ n := len(buf) / 8
+
+ for i := 0; i < n; i++ {
+ a := binary.LittleEndian.Uint64(buf)
+ d.a[i] ^= a
+ buf = buf[8:]
+ }
+}
+
+// copyOutGeneric copies uint64s to a byte buffer.
+func copyOutGeneric(d *state, b []byte) {
+ for i := 0; len(b) >= 8; i++ {
+ binary.LittleEndian.PutUint64(b, d.a[i])
+ b = b[8:]
+ }
+}
diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
new file mode 100644
index 000000000..1ce606246
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (amd64 || 386 || ppc64le) && !purego
+// +build amd64 386 ppc64le
+// +build !purego
+
+package sha3
+
+import "unsafe"
+
+// A storageBuf is an aligned array of maxRate bytes.
+type storageBuf [maxRate / 8]uint64
+
+func (b *storageBuf) asBytes() *[maxRate]byte {
+ return (*[maxRate]byte)(unsafe.Pointer(b))
+}
+
+// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a
+// XOR buf.
+func xorInUnaligned(d *state, buf []byte) {
+ n := len(buf)
+ bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8]
+ if n >= 72 {
+ d.a[0] ^= bw[0]
+ d.a[1] ^= bw[1]
+ d.a[2] ^= bw[2]
+ d.a[3] ^= bw[3]
+ d.a[4] ^= bw[4]
+ d.a[5] ^= bw[5]
+ d.a[6] ^= bw[6]
+ d.a[7] ^= bw[7]
+ d.a[8] ^= bw[8]
+ }
+ if n >= 104 {
+ d.a[9] ^= bw[9]
+ d.a[10] ^= bw[10]
+ d.a[11] ^= bw[11]
+ d.a[12] ^= bw[12]
+ }
+ if n >= 136 {
+ d.a[13] ^= bw[13]
+ d.a[14] ^= bw[14]
+ d.a[15] ^= bw[15]
+ d.a[16] ^= bw[16]
+ }
+ if n >= 144 {
+ d.a[17] ^= bw[17]
+ }
+ if n >= 168 {
+ d.a[18] ^= bw[18]
+ d.a[19] ^= bw[19]
+ d.a[20] ^= bw[20]
+ }
+}
+
+func copyOutUnaligned(d *state, buf []byte) {
+ ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
+ copy(buf, ab[:])
+}
+
+var (
+ xorIn = xorInUnaligned
+ copyOut = copyOutUnaligned
+)
+
+const xorImplementationUnaligned = "unaligned"
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index a3c021d3f..cf66309c4 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -21,9 +21,9 @@
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
-// func DoSomething(ctx context.Context, arg Arg) error {
-// // ... use ctx ...
-// }
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
index 344bd1433..0a54bdbcc 100644
--- a/vendor/golang.org/x/net/context/go17.go
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -54,11 +54,11 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
-// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-// defer cancel() // releases resources if slowOperation completes before timeout elapses
-// return slowOperation(ctx)
-// }
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
index 5270db5db..7b6b68511 100644
--- a/vendor/golang.org/x/net/context/pre_go17.go
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -264,11 +264,11 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) {
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
-// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-// defer cancel() // releases resources if slowOperation completes before timeout elapses
-// return slowOperation(ctx)
-// }
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go
index c79aa73f2..6e071e852 100644
--- a/vendor/golang.org/x/net/http/httpguts/httplex.go
+++ b/vendor/golang.org/x/net/http/httpguts/httplex.go
@@ -173,13 +173,15 @@ func tokenEqual(t1, t2 string) bool {
// isLWS reports whether b is linear white space, according
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
-// LWS = [CRLF] 1*( SP | HT )
+//
+// LWS = [CRLF] 1*( SP | HT )
func isLWS(b byte) bool { return b == ' ' || b == '\t' }
// isCTL reports whether b is a control byte, according
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
-// CTL = <any US-ASCII control character
-// (octets 0 - 31) and DEL (127)>
+//
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
func isCTL(b byte) bool {
const del = 0x7f // a CTL
return b < ' ' || b == del
@@ -189,12 +191,13 @@ func isCTL(b byte) bool {
// HTTP/2 imposes the additional restriction that uppercase ASCII
// letters are not allowed.
//
-// RFC 7230 says:
-// header-field = field-name ":" OWS field-value OWS
-// field-name = token
-// token = 1*tchar
-// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
-// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+// RFC 7230 says:
+//
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// token = 1*tchar
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
func ValidHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
@@ -267,27 +270,28 @@ var validHostByte = [256]bool{
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
//
-// message-header = field-name ":" [ field-value ]
-// field-value = *( field-content | LWS )
-// field-content = <the OCTETs making up the field-value
-// and consisting of either *TEXT or combinations
-// of token, separators, and quoted-string>
+// message-header = field-name ":" [ field-value ]
+// field-value = *( field-content | LWS )
+// field-content = <the OCTETs making up the field-value
+// and consisting of either *TEXT or combinations
+// of token, separators, and quoted-string>
//
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
//
-// TEXT = <any OCTET except CTLs,
-// but including LWS>
-// LWS = [CRLF] 1*( SP | HT )
-// CTL = <any US-ASCII control character
-// (octets 0 - 31) and DEL (127)>
+// TEXT = <any OCTET except CTLs,
+// but including LWS>
+// LWS = [CRLF] 1*( SP | HT )
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
//
// RFC 7230 says:
-// field-value = *( field-content / obs-fold )
-// obj-fold = N/A to http2, and deprecated
-// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
-// field-vchar = VCHAR / obs-text
-// obs-text = %x80-FF
-// VCHAR = "any visible [USASCII] character"
+//
+// field-value = *( field-content / obs-fold )
+// obj-fold = N/A to http2, and deprecated
+// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+// field-vchar = VCHAR / obs-text
+// obs-text = %x80-FF
+// VCHAR = "any visible [USASCII] character"
//
// http2 further says: "Similarly, HTTP/2 allows header field values
// that are not valid. While most of the values that can be encoded
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index c936843ea..780968d6c 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -139,7 +139,6 @@ func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *d
func (c *dialCall) dial(ctx context.Context, addr string) {
const singleUse = false // shared conn
c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
- close(c.done)
c.p.mu.Lock()
delete(c.p.dialing, addr)
@@ -147,6 +146,8 @@ func (c *dialCall) dial(ctx context.Context, addr string) {
c.p.addConnLocked(addr, c.res)
}
c.p.mu.Unlock()
+
+ close(c.done)
}
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
index 2663e5d28..f2067dabc 100644
--- a/vendor/golang.org/x/net/http2/errors.go
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -136,7 +136,7 @@ func (e headerFieldNameError) Error() string {
type headerFieldValueError string
func (e headerFieldValueError) Error() string {
- return fmt.Sprintf("invalid header field value %q", string(e))
+ return fmt.Sprintf("invalid header field value for %q", string(e))
}
var (
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 96a747905..0178647ee 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -1532,7 +1532,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
}
if !httpguts.ValidHeaderFieldValue(hf.Value) {
- invalid = headerFieldValueError(hf.Value)
+ // Don't include the value in the error, because it may be sensitive.
+ invalid = headerFieldValueError(hf.Name)
}
isPseudo := strings.HasPrefix(hf.Name, ":")
if isPseudo {
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
index fe0b84ccd..20d083a71 100644
--- a/vendor/golang.org/x/net/http2/hpack/huffman.go
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -169,25 +169,50 @@ func buildRootHuffmanNode() {
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
// and returns the extended buffer.
func AppendHuffmanString(dst []byte, s string) []byte {
- rembits := uint8(8)
-
+ // This relies on the maximum huffman code length being 30 (See tables.go huffmanCodeLen array)
+ // So if a uint64 buffer has less than 32 valid bits can always accommodate another huffmanCode.
+ var (
+ x uint64 // buffer
+ n uint // number valid of bits present in x
+ )
for i := 0; i < len(s); i++ {
- if rembits == 8 {
- dst = append(dst, 0)
+ c := s[i]
+ n += uint(huffmanCodeLen[c])
+ x <<= huffmanCodeLen[c] % 64
+ x |= uint64(huffmanCodes[c])
+ if n >= 32 {
+ n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift
+ y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32
+ dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y))
}
- dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
}
-
- if rembits < 8 {
- // special EOS symbol
- code := uint32(0x3fffffff)
- nbits := uint8(30)
-
- t := uint8(code >> (nbits - rembits))
- dst[len(dst)-1] |= t
+ // Add padding bits if necessary
+ if over := n % 8; over > 0 {
+ const (
+ eosCode = 0x3fffffff
+ eosNBits = 30
+ eosPadByte = eosCode >> (eosNBits - 8)
+ )
+ pad := 8 - over
+ x = (x << pad) | (eosPadByte >> over)
+ n += pad // 8 now divides into n exactly
}
-
- return dst
+ // n in (0, 8, 16, 24, 32)
+ switch n / 8 {
+ case 0:
+ return dst
+ case 1:
+ return append(dst, byte(x))
+ case 2:
+ y := uint16(x)
+ return append(dst, byte(y>>8), byte(y))
+ case 3:
+ y := uint16(x >> 8)
+ return append(dst, byte(y>>8), byte(y), byte(x))
+ }
+ // case 4:
+ y := uint32(x)
+ return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y))
}
// HuffmanEncodeLength returns the number of bytes required to encode
@@ -199,35 +224,3 @@ func HuffmanEncodeLength(s string) uint64 {
}
return (n + 7) / 8
}
-
-// appendByteToHuffmanCode appends Huffman code for c to dst and
-// returns the extended buffer and the remaining bits in the last
-// element. The appending is not byte aligned and the remaining bits
-// in the last element of dst is given in rembits.
-func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
- code := huffmanCodes[c]
- nbits := huffmanCodeLen[c]
-
- for {
- if rembits > nbits {
- t := uint8(code << (rembits - nbits))
- dst[len(dst)-1] |= t
- rembits -= nbits
- break
- }
-
- t := uint8(code >> (nbits - rembits))
- dst[len(dst)-1] |= t
-
- nbits -= rembits
- rembits = 8
-
- if nbits == 0 {
- break
- }
-
- dst = append(dst, 0)
- }
-
- return dst, rembits
-}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 5571ccfd2..479ba4b2b 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -13,7 +13,6 @@
// See https://http2.github.io/ for more information on HTTP/2.
//
// See https://http2.golang.org/ for a test server running this code.
-//
package http2 // import "golang.org/x/net/http2"
import (
@@ -176,10 +175,11 @@ func (s SettingID) String() string {
// name (key). See httpguts.ValidHeaderName for the base rules.
//
// Further, http2 says:
-// "Just as in HTTP/1.x, header field names are strings of ASCII
-// characters that are compared in a case-insensitive
-// fashion. However, header field names MUST be converted to
-// lowercase prior to their encoding in HTTP/2. "
+//
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
func validWireHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
@@ -365,8 +365,8 @@ func (s *sorter) SortStrings(ss []string) {
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
-// *) a non-empty string starting with '/'
-// *) the string '*', for OPTIONS requests.
+// - a non-empty string starting with '/'
+// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index e644d9b2f..47524a61a 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -315,6 +315,20 @@ type ServeConnOpts struct {
// requests. If nil, BaseConfig.Handler is used. If BaseConfig
// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
Handler http.Handler
+
+ // UpgradeRequest is an initial request received on a connection
+ // undergoing an h2c upgrade. The request body must have been
+ // completely read from the connection before calling ServeConn,
+ // and the 101 Switching Protocols response written.
+ UpgradeRequest *http.Request
+
+ // Settings is the decoded contents of the HTTP2-Settings header
+ // in an h2c upgrade request.
+ Settings []byte
+
+ // SawClientPreface is set if the HTTP/2 connection preface
+ // has already been read from the connection.
+ SawClientPreface bool
}
func (o *ServeConnOpts) context() context.Context {
@@ -383,6 +397,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
headerTableSize: initialHeaderTableSize,
serveG: newGoroutineLock(),
pushEnabled: true,
+ sawClientPreface: opts.SawClientPreface,
}
s.state.registerConn(sc)
@@ -400,7 +415,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
if s.NewWriteScheduler != nil {
sc.writeSched = s.NewWriteScheduler()
} else {
- sc.writeSched = NewRandomWriteScheduler()
+ sc.writeSched = NewPriorityWriteScheduler(nil)
}
// These start at the RFC-specified defaults. If there is a higher
@@ -465,9 +480,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
}
}
+ if opts.Settings != nil {
+ fr := &SettingsFrame{
+ FrameHeader: FrameHeader{valid: true},
+ p: opts.Settings,
+ }
+ if err := fr.ForeachSetting(sc.processSetting); err != nil {
+ sc.rejectConn(ErrCodeProtocol, "invalid settings")
+ return
+ }
+ opts.Settings = nil
+ }
+
if hook := testHookGetServerConn; hook != nil {
hook(sc)
}
+
+ if opts.UpgradeRequest != nil {
+ sc.upgradeRequest(opts.UpgradeRequest)
+ opts.UpgradeRequest = nil
+ }
+
sc.serve()
}
@@ -512,6 +545,7 @@ type serverConn struct {
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
pushEnabled bool
+ sawClientPreface bool // preface has already been read, used in h2c upgrade
sawFirstSettings bool // got the initial SETTINGS frame after the preface
needToSendSettingsAck bool
unackedSettings int // how many SETTINGS have we sent without ACKs?
@@ -974,6 +1008,9 @@ var errPrefaceTimeout = errors.New("timeout waiting for client preface")
// returns errPrefaceTimeout on timeout, or an error if the greeting
// is invalid.
func (sc *serverConn) readPreface() error {
+ if sc.sawClientPreface {
+ return nil
+ }
errc := make(chan error, 1)
go func() {
// Read the client preface
@@ -1915,6 +1952,26 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
return nil
}
+func (sc *serverConn) upgradeRequest(req *http.Request) {
+ sc.serveG.check()
+ id := uint32(1)
+ sc.maxClientStreamID = id
+ st := sc.newStream(id, 0, stateHalfClosedRemote)
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
+ rw := sc.newResponseWriter(st, req)
+
+ // Disable any read deadline set by the net/http package
+ // prior to the upgrade.
+ if sc.hs.ReadTimeout != 0 {
+ sc.conn.SetReadDeadline(time.Time{})
+ }
+
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+}
+
func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
sc := st.sc
sc.serveG.check()
@@ -2145,6 +2202,11 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
}
req = req.WithContext(st.ctx)
+ rw := sc.newResponseWriter(st, req)
+ return rw, req, nil
+}
+
+func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter {
rws := responseWriterStatePool.Get().(*responseWriterState)
bwSave := rws.bw
*rws = responseWriterState{} // zero all the fields
@@ -2153,10 +2215,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
rws.bw.Reset(chunkWriter{rws})
rws.stream = st
rws.req = req
- rws.body = body
-
- rw := &responseWriter{rws: rws}
- return rw, req, nil
+ return &responseWriter{rws: rws}
}
// Run on its own goroutine.
@@ -2316,17 +2375,18 @@ type requestBody struct {
_ incomparable
stream *stream
conn *serverConn
- closed bool // for use by Close only
- sawEOF bool // for use by Read only
- pipe *pipe // non-nil if we have a HTTP entity message body
- needsContinue bool // need to send a 100-continue
+ closeOnce sync.Once // for use by Close only
+ sawEOF bool // for use by Read only
+ pipe *pipe // non-nil if we have a HTTP entity message body
+ needsContinue bool // need to send a 100-continue
}
func (b *requestBody) Close() error {
- if b.pipe != nil && !b.closed {
- b.pipe.BreakWithError(errClosedBody)
- }
- b.closed = true
+ b.closeOnce.Do(func() {
+ if b.pipe != nil {
+ b.pipe.BreakWithError(errClosedBody)
+ }
+ })
return nil
}
@@ -2370,7 +2430,6 @@ type responseWriterState struct {
// immutable within a request:
stream *stream
req *http.Request
- body *requestBody // to close at end of request, if DATA frames didn't
conn *serverConn
// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
@@ -2546,8 +2605,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
// prior to the headers being written. If the set of trailers is fixed
// or known before the header is written, the normal Go trailers mechanism
// is preferred:
-// https://golang.org/pkg/net/http/#ResponseWriter
-// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+//
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
const TrailerPrefix = "Trailer:"
// promoteUndeclaredTrailers permits http.Handlers to set trailers
@@ -2643,8 +2703,7 @@ func checkWriteHeaderCode(code int) {
// Issue 22880: require valid WriteHeader status codes.
// For now we only enforce that it's three digits.
// In the future we might block things over 599 (600 and above aren't defined
- // at http://httpwg.org/specs/rfc7231.html#status.codes)
- // and we might block under 200 (once we have more mature 1xx support).
+ // at http://httpwg.org/specs/rfc7231.html#status.codes).
// But for now any three digits.
//
// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
@@ -2665,13 +2724,41 @@ func (w *responseWriter) WriteHeader(code int) {
}
func (rws *responseWriterState) writeHeader(code int) {
- if !rws.wroteHeader {
- checkWriteHeaderCode(code)
- rws.wroteHeader = true
- rws.status = code
- if len(rws.handlerHeader) > 0 {
- rws.snapHeader = cloneHeader(rws.handlerHeader)
+ if rws.wroteHeader {
+ return
+ }
+
+ checkWriteHeaderCode(code)
+
+ // Handle informational headers
+ if code >= 100 && code <= 199 {
+ // Per RFC 8297 we must not clear the current header map
+ h := rws.handlerHeader
+
+ _, cl := h["Content-Length"]
+ _, te := h["Transfer-Encoding"]
+ if cl || te {
+ h = h.Clone()
+ h.Del("Content-Length")
+ h.Del("Transfer-Encoding")
+ }
+
+ if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: code,
+ h: h,
+ endStream: rws.handlerDone && !rws.hasTrailers(),
+ }) != nil {
+ rws.dirty = true
}
+
+ return
+ }
+
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = cloneHeader(rws.handlerHeader)
}
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 4f0989763..4ded4dfd5 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -16,7 +16,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"log"
"math"
mathrand "math/rand"
@@ -501,12 +500,14 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
+ t.vlogf("RoundTrip retrying after failure: %v", err)
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
select {
case <-time.After(time.Second * time.Duration(backoff)):
+ t.vlogf("RoundTrip retrying after failure: %v", err)
continue
case <-req.Context().Done():
err = req.Context().Err()
@@ -732,10 +733,13 @@ func (cc *ClientConn) healthCheck() {
// trigger the healthCheck again if there is no frame received.
ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
defer cancel()
+ cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
if err != nil {
+ cc.vlogf("http2: Transport health check failure: %v", err)
cc.closeForLostPing()
- return
+ } else {
+ cc.vlogf("http2: Transport health check success")
}
}
@@ -1765,7 +1769,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
- return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
+ // Don't include the value in the error, because it may be sensitive.
+ return nil, fmt.Errorf("invalid HTTP header value for header %q", k)
}
}
}
@@ -2898,7 +2903,12 @@ func (t *Transport) logf(format string, args ...interface{}) {
log.Printf(format, args...)
}
-var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+var noBody io.ReadCloser = noBodyReader{}
+
+type noBodyReader struct{}
+
+func (noBodyReader) Close() error { return nil }
+func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF }
type missingBody struct{}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
index 2618b2c11..0a242c669 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -383,16 +383,15 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
var n *priorityNode
- if id := wr.StreamID(); id == 0 {
+ if wr.isControl() {
n = &ws.root
} else {
+ id := wr.StreamID()
n = ws.nodes[id]
if n == nil {
// id is an idle or closed stream. wr should not be a HEADERS or
- // DATA frame. However, wr can be a RST_STREAM. In this case, we
- // push wr onto the root, rather than creating a new priorityNode,
- // since RST_STREAM is tiny and the stream's priority is unknown
- // anyway. See issue #17919.
+ // DATA frame. In other case, we push wr onto the root, rather
+ // than creating a new priorityNode.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go
index 7a8cf889b..9c070a44b 100644
--- a/vendor/golang.org/x/net/idna/trieval.go
+++ b/vendor/golang.org/x/net/idna/trieval.go
@@ -17,23 +17,23 @@ package idna
//
// The per-rune values have the following format:
//
-// if mapped {
-// if inlinedXOR {
-// 15..13 inline XOR marker
-// 12..11 unused
-// 10..3 inline XOR mask
-// } else {
-// 15..3 index into xor or mapping table
-// }
-// } else {
-// 15..14 unused
-// 13 mayNeedNorm
-// 12..11 attributes
-// 10..8 joining type
-// 7..3 category type
-// }
-// 2 use xor pattern
-// 1..0 mapped category
+// if mapped {
+// if inlinedXOR {
+// 15..13 inline XOR marker
+// 12..11 unused
+// 10..3 inline XOR mask
+// } else {
+// 15..3 index into xor or mapping table
+// }
+// } else {
+// 15..14 unused
+// 13 mayNeedNorm
+// 12..11 attributes
+// 10..8 joining type
+// 7..3 category type
+// }
+// 2 use xor pattern
+// 1..0 mapped category
//
// See the definitions below for a more detailed description of the various
// bits.
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index 9857fe53d..4c0850a45 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -8,22 +8,35 @@ package errgroup
import (
"context"
+ "fmt"
"sync"
)
+type token struct{}
+
// A Group is a collection of goroutines working on subtasks that are part of
// the same overall task.
//
-// A zero Group is valid and does not cancel on error.
+// A zero Group is valid, has no limit on the number of active goroutines,
+// and does not cancel on error.
type Group struct {
cancel func()
wg sync.WaitGroup
+ sem chan token
+
errOnce sync.Once
err error
}
+func (g *Group) done() {
+ if g.sem != nil {
+ <-g.sem
+ }
+ g.wg.Done()
+}
+
// WithContext returns a new Group and an associated Context derived from ctx.
//
// The derived Context is canceled the first time a function passed to Go
@@ -45,14 +58,48 @@ func (g *Group) Wait() error {
}
// Go calls the given function in a new goroutine.
+// It blocks until the new goroutine can be added without the number of
+// active goroutines in the group exceeding the configured limit.
//
// The first call to return a non-nil error cancels the group; its error will be
// returned by Wait.
func (g *Group) Go(f func() error) {
+ if g.sem != nil {
+ g.sem <- token{}
+ }
+
g.wg.Add(1)
+ go func() {
+ defer g.done()
+
+ if err := f(); err != nil {
+ g.errOnce.Do(func() {
+ g.err = err
+ if g.cancel != nil {
+ g.cancel()
+ }
+ })
+ }
+ }()
+}
+
+// TryGo calls the given function in a new goroutine only if the number of
+// active goroutines in the group is currently below the configured limit.
+//
+// The return value reports whether the goroutine was started.
+func (g *Group) TryGo(f func() error) bool {
+ if g.sem != nil {
+ select {
+ case g.sem <- token{}:
+ // Note: this allows barging iff channels in general allow barging.
+ default:
+ return false
+ }
+ }
+ g.wg.Add(1)
go func() {
- defer g.wg.Done()
+ defer g.done()
if err := f(); err != nil {
g.errOnce.Do(func() {
@@ -63,4 +110,23 @@ func (g *Group) Go(f func() error) {
})
}
}()
+ return true
+}
+
+// SetLimit limits the number of active goroutines in this group to at most n.
+// A negative value indicates no limit.
+//
+// Any subsequent call to the Go method will block until it can add an active
+// goroutine without exceeding the configured limit.
+//
+// The limit must not be modified while any goroutines in the group are active.
+func (g *Group) SetLimit(n int) {
+ if n < 0 {
+ g.sem = nil
+ return
+ }
+ if len(g.sem) != 0 {
+ panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
+ }
+ g.sem = make(chan token, n)
}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
index e363c7d13..a4605e6d1 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
+++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
@@ -7,6 +7,7 @@
#include <cpuid.h>
#include <stdint.h>
+#include <x86intrin.h>
// Need to wrap __get_cpuid_count because it's declared as static.
int
@@ -17,27 +18,21 @@ gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf,
return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx);
}
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
+#pragma GCC push_options
+#pragma GCC target("xsave")
+#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function)
+
// xgetbv reads the contents of an XCR (Extended Control Register)
// specified in the ECX register into registers EDX:EAX.
// Currently, the only supported value for XCR is 0.
-//
-// TODO: Replace with a better alternative:
-//
-// #include <xsaveintrin.h>
-//
-// #pragma GCC target("xsave")
-//
-// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) {
-// unsigned long long x = _xgetbv(0);
-// *eax = x & 0xffffffff;
-// *edx = (x >> 32) & 0xffffffff;
-// }
-//
-// Note that _xgetbv is defined starting with GCC 8.
void
gccgoXgetbv(uint32_t *eax, uint32_t *edx)
{
- __asm(" xorl %%ecx, %%ecx\n"
- " xgetbv"
- : "=a"(*eax), "=d"(*edx));
+ uint64_t v = _xgetbv(0);
+ *eax = v & 0xffffffff;
+ *edx = v >> 32;
}
+
+#pragma clang attribute pop
+#pragma GCC pop_options
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
index 6abd48eef..565357288 100644
--- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
+++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
@@ -30,7 +30,7 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
MOVV trap+0(FP), R11 // syscall entry
SYSCALL
MOVV R4, r1+32(FP)
- MOVV R5, r2+40(FP)
+ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
JAL runtime·exitsyscall(SB)
RET
@@ -50,5 +50,5 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVV trap+0(FP), R11 // syscall entry
SYSCALL
MOVV R4, r1+32(FP)
- MOVV R5, r2+40(FP)
+ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
RET
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go
index ad22c33db..ac579c60f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -217,12 +217,12 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
return
}
-func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
+func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
// Recvmsg not implemented on AIX
return -1, -1, -1, ENOSYS
}
-func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
+func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
// SendmsgN not implemented on AIX
return -1, ENOSYS
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index 9c87c5f07..c437fc5d7 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -325,27 +325,26 @@ func GetsockoptString(fd, level, opt int) (string, error) {
//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
-func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
+func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
var msg Msghdr
msg.Name = (*byte)(unsafe.Pointer(rsa))
msg.Namelen = uint32(SizeofSockaddrAny)
- var iov Iovec
- if len(p) > 0 {
- iov.Base = (*byte)(unsafe.Pointer(&p[0]))
- iov.SetLen(len(p))
- }
var dummy byte
if len(oob) > 0 {
// receive at least one normal byte
- if len(p) == 0 {
- iov.Base = &dummy
- iov.SetLen(1)
+ if emptyIovecs(iov) {
+ var iova [1]Iovec
+ iova[0].Base = &dummy
+ iova[0].SetLen(1)
+ iov = iova[:]
}
msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
msg.SetControllen(len(oob))
}
- msg.Iov = &iov
- msg.Iovlen = 1
+ if len(iov) > 0 {
+ msg.Iov = &iov[0]
+ msg.SetIovlen(len(iov))
+ }
if n, err = recvmsg(fd, &msg, flags); err != nil {
return
}
@@ -356,31 +355,32 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
-func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
+func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
var msg Msghdr
msg.Name = (*byte)(unsafe.Pointer(ptr))
msg.Namelen = uint32(salen)
- var iov Iovec
- if len(p) > 0 {
- iov.Base = (*byte)(unsafe.Pointer(&p[0]))
- iov.SetLen(len(p))
- }
var dummy byte
+ var empty bool
if len(oob) > 0 {
// send at least one normal byte
- if len(p) == 0 {
- iov.Base = &dummy
- iov.SetLen(1)
+ empty := emptyIovecs(iov)
+ if empty {
+ var iova [1]Iovec
+ iova[0].Base = &dummy
+ iova[0].SetLen(1)
+ iov = iova[:]
}
msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
msg.SetControllen(len(oob))
}
- msg.Iov = &iov
- msg.Iovlen = 1
+ if len(iov) > 0 {
+ msg.Iov = &iov[0]
+ msg.SetIovlen(len(iov))
+ }
if n, err = sendmsg(fd, &msg, flags); err != nil {
return 0, err
}
- if len(oob) > 0 && len(p) == 0 {
+ if len(oob) > 0 && empty {
n = 0
}
return n, nil
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index e5448cc93..4f87f16ea 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -393,6 +393,13 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) {
return x, err
}
+func GetsockoptTCPConnectionInfo(fd, level, opt int) (*TCPConnectionInfo, error) {
+ var value TCPConnectionInfo
+ vallen := _Socklen(SizeofTCPConnectionInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) {
mib, err := sysctlmib(name, args...)
if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go
index 8d5f294c4..e48244a9c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_illumos.go
+++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go
@@ -20,10 +20,9 @@ func bytes2iovec(bs [][]byte) []Iovec {
for i, b := range bs {
iovecs[i].SetLen(len(b))
if len(b) > 0 {
- // somehow Iovec.Base on illumos is (*int8), not (*byte)
- iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0]))
+ iovecs[i].Base = &b[0]
} else {
- iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero))
+ iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero))
}
}
return iovecs
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index c8d203212..5e4a94f73 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1499,18 +1499,13 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error
//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL
//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL
-func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
+func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
var msg Msghdr
msg.Name = (*byte)(unsafe.Pointer(rsa))
msg.Namelen = uint32(SizeofSockaddrAny)
- var iov Iovec
- if len(p) > 0 {
- iov.Base = &p[0]
- iov.SetLen(len(p))
- }
var dummy byte
if len(oob) > 0 {
- if len(p) == 0 {
+ if emptyIovecs(iov) {
var sockType int
sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
if err != nil {
@@ -1518,15 +1513,19 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn
}
// receive at least one normal byte
if sockType != SOCK_DGRAM {
- iov.Base = &dummy
- iov.SetLen(1)
+ var iova [1]Iovec
+ iova[0].Base = &dummy
+ iova[0].SetLen(1)
+ iov = iova[:]
}
}
msg.Control = &oob[0]
msg.SetControllen(len(oob))
}
- msg.Iov = &iov
- msg.Iovlen = 1
+ if len(iov) > 0 {
+ msg.Iov = &iov[0]
+ msg.SetIovlen(len(iov))
+ }
if n, err = recvmsg(fd, &msg, flags); err != nil {
return
}
@@ -1535,18 +1534,15 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn
return
}
-func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
+func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
var msg Msghdr
msg.Name = (*byte)(ptr)
msg.Namelen = uint32(salen)
- var iov Iovec
- if len(p) > 0 {
- iov.Base = &p[0]
- iov.SetLen(len(p))
- }
var dummy byte
+ var empty bool
if len(oob) > 0 {
- if len(p) == 0 {
+ empty := emptyIovecs(iov)
+ if empty {
var sockType int
sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
if err != nil {
@@ -1554,19 +1550,22 @@ func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags i
}
// send at least one normal byte
if sockType != SOCK_DGRAM {
- iov.Base = &dummy
- iov.SetLen(1)
+ var iova [1]Iovec
+ iova[0].Base = &dummy
+ iova[0].SetLen(1)
}
}
msg.Control = &oob[0]
msg.SetControllen(len(oob))
}
- msg.Iov = &iov
- msg.Iovlen = 1
+ if len(iov) > 0 {
+ msg.Iov = &iov[0]
+ msg.SetIovlen(len(iov))
+ }
if n, err = sendmsg(fd, &msg, flags); err != nil {
return 0, err
}
- if len(oob) > 0 && len(p) == 0 {
+ if len(oob) > 0 && empty {
n = 0
}
return n, nil
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
index 28ba7b8cb..0b69c3eff 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
@@ -12,8 +12,6 @@ import "unsafe"
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
//sys Fchown(fd int, uid int, gid int) (err error)
-//sys Fstat(fd int, stat *Stat_t) (err error)
-//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
//sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int)
@@ -43,6 +41,43 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+func timespecFromStatxTimestamp(x StatxTimestamp) Timespec {
+ return Timespec{
+ Sec: x.Sec,
+ Nsec: int64(x.Nsec),
+ }
+}
+
+func Fstatat(fd int, path string, stat *Stat_t, flags int) error {
+ var r Statx_t
+ // Do it the glibc way, add AT_NO_AUTOMOUNT.
+ if err := Statx(fd, path, AT_NO_AUTOMOUNT|flags, STATX_BASIC_STATS, &r); err != nil {
+ return err
+ }
+
+ stat.Dev = Mkdev(r.Dev_major, r.Dev_minor)
+ stat.Ino = r.Ino
+ stat.Mode = uint32(r.Mode)
+ stat.Nlink = r.Nlink
+ stat.Uid = r.Uid
+ stat.Gid = r.Gid
+ stat.Rdev = Mkdev(r.Rdev_major, r.Rdev_minor)
+ // hope we don't get to process files so large to overflow these size
+ // fields...
+ stat.Size = int64(r.Size)
+ stat.Blksize = int32(r.Blksize)
+ stat.Blocks = int64(r.Blocks)
+ stat.Atim = timespecFromStatxTimestamp(r.Atime)
+ stat.Mtim = timespecFromStatxTimestamp(r.Mtime)
+ stat.Ctim = timespecFromStatxTimestamp(r.Ctime)
+
+ return nil
+}
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ return Fstatat(fd, "", stat, AT_EMPTY_PATH)
+}
+
func Stat(path string, stat *Stat_t) (err error) {
return Fstatat(AT_FDCWD, path, stat, 0)
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
index 8ff7adba0..925a748a3 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
@@ -22,6 +22,7 @@ import "unsafe"
//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Getuid() (uid int)
//sys Listen(s int, n int) (err error)
+//sys MemfdSecret(flags int) (fd int, err error)
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 5c2003cec..b5ec457cd 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -451,26 +451,25 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg
-func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
+func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
var msg Msghdr
msg.Name = (*byte)(unsafe.Pointer(rsa))
msg.Namelen = uint32(SizeofSockaddrAny)
- var iov Iovec
- if len(p) > 0 {
- iov.Base = (*int8)(unsafe.Pointer(&p[0]))
- iov.SetLen(len(p))
- }
- var dummy int8
+ var dummy byte
if len(oob) > 0 {
// receive at least one normal byte
- if len(p) == 0 {
- iov.Base = &dummy
- iov.SetLen(1)
+ if emptyIovecs(iov) {
+ var iova [1]Iovec
+ iova[0].Base = &dummy
+ iova[0].SetLen(1)
+ iov = iova[:]
}
msg.Accrightslen = int32(len(oob))
}
- msg.Iov = &iov
- msg.Iovlen = 1
+ if len(iov) > 0 {
+ msg.Iov = &iov[0]
+ msg.SetIovlen(len(iov))
+ }
if n, err = recvmsg(fd, &msg, flags); n == -1 {
return
}
@@ -480,30 +479,31 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg
-func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
+func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
var msg Msghdr
msg.Name = (*byte)(unsafe.Pointer(ptr))
msg.Namelen = uint32(salen)
- var iov Iovec
- if len(p) > 0 {
- iov.Base = (*int8)(unsafe.Pointer(&p[0]))
- iov.SetLen(len(p))
- }
- var dummy int8
+ var dummy byte
+ var empty bool
if len(oob) > 0 {
// send at least one normal byte
- if len(p) == 0 {
- iov.Base = &dummy
- iov.SetLen(1)
+ empty = emptyIovecs(iov)
+ if empty {
+ var iova [1]Iovec
+ iova[0].Base = &dummy
+ iova[0].SetLen(1)
+ iov = iova[:]
}
msg.Accrightslen = int32(len(oob))
}
- msg.Iov = &iov
- msg.Iovlen = 1
+ if len(iov) > 0 {
+ msg.Iov = &iov[0]
+ msg.SetIovlen(len(iov))
+ }
if n, err = sendmsg(fd, &msg, flags); err != nil {
return 0, err
}
- if len(oob) > 0 && len(p) == 0 {
+ if len(oob) > 0 && empty {
n = 0
}
return n, nil
@@ -618,6 +618,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Getpriority(which int, who int) (n int, err error)
//sysnb Getrlimit(which int, lim *Rlimit) (err error)
//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettimeofday(tv *Timeval) (err error)
//sysnb Getuid() (uid int)
//sys Kill(pid int, signum syscall.Signal) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index 70508afc1..1ff5060b5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -338,8 +338,13 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
}
func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var iov [1]Iovec
+ if len(p) > 0 {
+ iov[0].Base = &p[0]
+ iov[0].SetLen(len(p))
+ }
var rsa RawSockaddrAny
- n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa)
+ n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa)
// source address is only specified if the socket is unconnected
if rsa.Addr.Family != AF_UNSPEC {
from, err = anyToSockaddr(fd, &rsa)
@@ -347,12 +352,67 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
return
}
+// RecvmsgBuffers receives a message from a socket using the recvmsg
+// system call. The flags are passed to recvmsg. Any non-control data
+// read is scattered into the buffers slices. The results are:
+// - n is the number of non-control data read into bufs
+// - oobn is the number of control data read into oob; this may be interpreted using [ParseSocketControlMessage]
+// - recvflags is flags returned by recvmsg
+// - from is the address of the sender
+func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ iov := make([]Iovec, len(buffers))
+ for i := range buffers {
+ if len(buffers[i]) > 0 {
+ iov[i].Base = &buffers[i][0]
+ iov[i].SetLen(len(buffers[i]))
+ } else {
+ iov[i].Base = (*byte)(unsafe.Pointer(&_zero))
+ }
+ }
+ var rsa RawSockaddrAny
+ n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa)
+ if err == nil && rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(fd, &rsa)
+ }
+ return
+}
+
func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
_, err = SendmsgN(fd, p, oob, to, flags)
return
}
func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var iov [1]Iovec
+ if len(p) > 0 {
+ iov[0].Base = &p[0]
+ iov[0].SetLen(len(p))
+ }
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ return sendmsgN(fd, iov[:], oob, ptr, salen, flags)
+}
+
+// SendmsgBuffers sends a message on a socket to an address using the sendmsg
+// system call. The flags are passed to sendmsg. Any non-control data written
+// is gathered from buffers. The function returns the number of bytes written
+// to the socket.
+func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ iov := make([]Iovec, len(buffers))
+ for i := range buffers {
+ if len(buffers[i]) > 0 {
+ iov[i].Base = &buffers[i][0]
+ iov[i].SetLen(len(buffers[i]))
+ } else {
+ iov[i].Base = (*byte)(unsafe.Pointer(&_zero))
+ }
+ }
var ptr unsafe.Pointer
var salen _Socklen
if to != nil {
@@ -361,7 +421,7 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
return 0, err
}
}
- return sendmsgN(fd, p, oob, ptr, salen, flags)
+ return sendmsgN(fd, iov, oob, ptr, salen, flags)
}
func Send(s int, buf []byte, flags int) (err error) {
@@ -484,3 +544,13 @@ func Lutimes(path string, tv []Timeval) error {
}
return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW)
}
+
+// emptyIovec reports whether there are no bytes in the slice of Iovec.
+func emptyIovecs(iov []Iovec) bool {
+ for i := range iov {
+ if iov[i].Len > 0 {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index c0a43f8ba..dfa9bd938 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -184,6 +184,7 @@ const (
BPF_F_ALLOW_MULTI = 0x2
BPF_F_ALLOW_OVERRIDE = 0x1
BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_KPROBE_MULTI_RETURN = 0x1
BPF_F_QUERY_EFFECTIVE = 0x1
BPF_F_REPLACE = 0x4
BPF_F_SLEEPABLE = 0x10
@@ -191,6 +192,8 @@ const (
BPF_F_TEST_RND_HI32 = 0x4
BPF_F_TEST_RUN_ON_CPU = 0x1
BPF_F_TEST_STATE_FREQ = 0x8
+ BPF_F_TEST_XDP_LIVE_FRAMES = 0x2
+ BPF_F_XDP_HAS_FRAGS = 0x20
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -517,9 +520,9 @@ const (
DM_UUID_FLAG = 0x4000
DM_UUID_LEN = 0x81
DM_VERSION = 0xc138fd00
- DM_VERSION_EXTRA = "-ioctl (2021-03-22)"
+ DM_VERSION_EXTRA = "-ioctl (2022-02-22)"
DM_VERSION_MAJOR = 0x4
- DM_VERSION_MINOR = 0x2d
+ DM_VERSION_MINOR = 0x2e
DM_VERSION_PATCHLEVEL = 0x0
DT_BLK = 0x6
DT_CHR = 0x2
@@ -712,6 +715,7 @@ const (
ETH_P_EDSA = 0xdada
ETH_P_ERSPAN = 0x88be
ETH_P_ERSPAN2 = 0x22eb
+ ETH_P_ETHERCAT = 0x88a4
ETH_P_FCOE = 0x8906
ETH_P_FIP = 0x8914
ETH_P_HDLC = 0x19
@@ -749,6 +753,7 @@ const (
ETH_P_PPP_MP = 0x8
ETH_P_PPP_SES = 0x8864
ETH_P_PREAUTH = 0x88c7
+ ETH_P_PROFINET = 0x8892
ETH_P_PRP = 0x88fb
ETH_P_PUP = 0x200
ETH_P_PUPAT = 0x201
@@ -837,6 +842,7 @@ const (
FAN_FS_ERROR = 0x8000
FAN_MARK_ADD = 0x1
FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_EVICTABLE = 0x200
FAN_MARK_FILESYSTEM = 0x100
FAN_MARK_FLUSH = 0x80
FAN_MARK_IGNORED_MASK = 0x20
@@ -1055,7 +1061,7 @@ const (
IFA_F_STABLE_PRIVACY = 0x800
IFA_F_TEMPORARY = 0x1
IFA_F_TENTATIVE = 0x40
- IFA_MAX = 0xa
+ IFA_MAX = 0xb
IFF_ALLMULTI = 0x200
IFF_ATTACH_QUEUE = 0x200
IFF_AUTOMEDIA = 0x4000
@@ -1403,6 +1409,7 @@ const (
LANDLOCK_ACCESS_FS_MAKE_SYM = 0x1000
LANDLOCK_ACCESS_FS_READ_DIR = 0x8
LANDLOCK_ACCESS_FS_READ_FILE = 0x4
+ LANDLOCK_ACCESS_FS_REFER = 0x2000
LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10
LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
@@ -1758,6 +1765,7 @@ const (
NLM_F_ACK_TLVS = 0x200
NLM_F_APPEND = 0x800
NLM_F_ATOMIC = 0x400
+ NLM_F_BULK = 0x200
NLM_F_CAPPED = 0x100
NLM_F_CREATE = 0x400
NLM_F_DUMP = 0x300
@@ -2075,6 +2083,11 @@ const (
PR_SET_UNALIGN = 0x6
PR_SET_VMA = 0x53564d41
PR_SET_VMA_ANON_NAME = 0x0
+ PR_SME_GET_VL = 0x40
+ PR_SME_SET_VL = 0x3f
+ PR_SME_SET_VL_ONEXEC = 0x40000
+ PR_SME_VL_INHERIT = 0x20000
+ PR_SME_VL_LEN_MASK = 0xffff
PR_SPEC_DISABLE = 0x4
PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
@@ -2227,8 +2240,9 @@ const (
RTC_FEATURE_ALARM = 0x0
RTC_FEATURE_ALARM_RES_2S = 0x3
RTC_FEATURE_ALARM_RES_MINUTE = 0x1
+ RTC_FEATURE_ALARM_WAKEUP_ONLY = 0x7
RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6
- RTC_FEATURE_CNT = 0x7
+ RTC_FEATURE_CNT = 0x8
RTC_FEATURE_CORRECTION = 0x5
RTC_FEATURE_NEED_WEEK_DAY = 0x2
RTC_FEATURE_UPDATE_INTERRUPT = 0x4
@@ -2302,6 +2316,7 @@ const (
RTM_DELRULE = 0x21
RTM_DELTCLASS = 0x29
RTM_DELTFILTER = 0x2d
+ RTM_DELTUNNEL = 0x79
RTM_DELVLAN = 0x71
RTM_F_CLONED = 0x200
RTM_F_EQUALIZE = 0x400
@@ -2334,8 +2349,9 @@ const (
RTM_GETSTATS = 0x5e
RTM_GETTCLASS = 0x2a
RTM_GETTFILTER = 0x2e
+ RTM_GETTUNNEL = 0x7a
RTM_GETVLAN = 0x72
- RTM_MAX = 0x77
+ RTM_MAX = 0x7b
RTM_NEWACTION = 0x30
RTM_NEWADDR = 0x14
RTM_NEWADDRLABEL = 0x48
@@ -2359,11 +2375,13 @@ const (
RTM_NEWSTATS = 0x5c
RTM_NEWTCLASS = 0x28
RTM_NEWTFILTER = 0x2c
- RTM_NR_FAMILIES = 0x1a
- RTM_NR_MSGTYPES = 0x68
+ RTM_NEWTUNNEL = 0x78
+ RTM_NR_FAMILIES = 0x1b
+ RTM_NR_MSGTYPES = 0x6c
RTM_SETDCB = 0x4f
RTM_SETLINK = 0x13
RTM_SETNEIGHTBL = 0x43
+ RTM_SETSTATS = 0x5f
RTNH_ALIGNTO = 0x4
RTNH_COMPARE_MASK = 0x59
RTNH_F_DEAD = 0x1
@@ -2544,6 +2562,9 @@ const (
SOCK_RDM = 0x4
SOCK_SEQPACKET = 0x5
SOCK_SNDBUF_LOCK = 0x1
+ SOCK_TXREHASH_DEFAULT = 0xff
+ SOCK_TXREHASH_DISABLED = 0x0
+ SOCK_TXREHASH_ENABLED = 0x1
SOL_AAL = 0x109
SOL_ALG = 0x117
SOL_ATM = 0x108
@@ -2559,6 +2580,8 @@ const (
SOL_IUCV = 0x115
SOL_KCM = 0x119
SOL_LLC = 0x10c
+ SOL_MCTP = 0x11d
+ SOL_MPTCP = 0x11c
SOL_NETBEUI = 0x10b
SOL_NETLINK = 0x10e
SOL_NFC = 0x118
@@ -2674,7 +2697,7 @@ const (
TASKSTATS_GENL_NAME = "TASKSTATS"
TASKSTATS_GENL_VERSION = 0x1
TASKSTATS_TYPE_MAX = 0x6
- TASKSTATS_VERSION = 0xb
+ TASKSTATS_VERSION = 0xd
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 1b305fab1..274e2dabd 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -326,6 +326,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
@@ -350,6 +351,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 6bcdef5dd..95b6eeedf 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -327,6 +327,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
@@ -351,6 +352,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index e65df0f8d..918cd130e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -333,6 +333,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
@@ -357,6 +358,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index c7021115a..3907dc5a9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -323,6 +323,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
@@ -347,6 +348,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
@@ -511,6 +513,7 @@ const (
WORDSIZE = 0x40
XCASE = 0x4
XTABS = 0x1800
+ ZA_MAGIC = 0x54366345
_HIDIOCGRAWNAME = 0x80804804
_HIDIOCGRAWPHYS = 0x80404805
_HIDIOCGRAWUNIQ = 0x80404808
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 0d83a1cd4..03d5c105a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -109,8 +109,6 @@ const (
IUCLC = 0x200
IXOFF = 0x1000
IXON = 0x400
- LASX_CTX_MAGIC = 0x41535801
- LSX_CTX_MAGIC = 0x53580001
MAP_ANON = 0x20
MAP_ANONYMOUS = 0x20
MAP_DENYWRITE = 0x800
@@ -319,6 +317,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
@@ -343,6 +342,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 7f44a495b..bd794e010 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -326,6 +326,7 @@ const (
SO_RCVBUF = 0x1002
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
@@ -351,6 +352,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 2f92b4e48..6c741b054 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -326,6 +326,7 @@ const (
SO_RCVBUF = 0x1002
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
@@ -351,6 +352,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index f5367a966..807b8cd2a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -326,6 +326,7 @@ const (
SO_RCVBUF = 0x1002
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
@@ -351,6 +352,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 2e22337d7..a39e4f5c2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -326,6 +326,7 @@ const (
SO_RCVBUF = 0x1002
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
@@ -351,6 +352,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 858c4f30f..c0fcda86b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -381,6 +381,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
@@ -405,6 +406,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index af2a7ba6e..f3b72407a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -385,6 +385,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
@@ -409,6 +410,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index eaa2eb8e2..72f2a45d5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -385,6 +385,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
@@ -409,6 +410,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index faaa9f063..45b214b4d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -314,6 +314,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
@@ -338,6 +339,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 0d161f0b7..1897f207b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -389,6 +389,7 @@ const (
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
+ SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
@@ -413,6 +414,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
+ SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 4fd497a3e..1fb7a3953 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -380,6 +380,7 @@ const (
SO_RCVBUF = 0x1002
SO_RCVBUFFORCE = 0x100b
SO_RCVLOWAT = 0x800
+ SO_RCVMARK = 0x54
SO_RCVTIMEO = 0x2000
SO_RCVTIMEO_NEW = 0x44
SO_RCVTIMEO_OLD = 0x2000
@@ -404,6 +405,7 @@ const (
SO_TIMESTAMPNS_NEW = 0x42
SO_TIMESTAMPNS_OLD = 0x21
SO_TIMESTAMP_NEW = 0x46
+ SO_TXREHASH = 0x53
SO_TXTIME = 0x3f
SO_TYPE = 0x1008
SO_WIFI_STATUS = 0x25
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
index 8cdfbe71e..523f2ba03 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
@@ -83,31 +83,6 @@ func Fchown(fd int, uid int, gid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Fstatfs(fd int, buf *Statfs_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
index a1a9bcbbd..1239cc2de 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
@@ -180,6 +180,17 @@ func Listen(s int, n int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdSecret(flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index d12f4fbfe..fdf53f8da 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -66,6 +66,7 @@ import (
//go:cgo_import_dynamic libc_getpriority getpriority "libc.so"
//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so"
//go:cgo_import_dynamic libc_getrusage getrusage "libc.so"
+//go:cgo_import_dynamic libc_getsid getsid "libc.so"
//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so"
//go:cgo_import_dynamic libc_getuid getuid "libc.so"
//go:cgo_import_dynamic libc_kill kill "libc.so"
@@ -202,6 +203,7 @@ import (
//go:linkname procGetpriority libc_getpriority
//go:linkname procGetrlimit libc_getrlimit
//go:linkname procGetrusage libc_getrusage
+//go:linkname procGetsid libc_getsid
//go:linkname procGettimeofday libc_gettimeofday
//go:linkname procGetuid libc_getuid
//go:linkname procKill libc_kill
@@ -339,6 +341,7 @@ var (
procGetpriority,
procGetrlimit,
procGetrusage,
+ procGetsid,
procGettimeofday,
procGetuid,
procKill,
@@ -1044,6 +1047,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettimeofday(tv *Timeval) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index e443f9a32..44a764c99 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -85,8 +85,6 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
- SYS_FSTATAT = 79
- SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
SYS_FDATASYNC = 83
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index c3a5af862..3a9c96b28 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -309,6 +309,7 @@ const (
SYS_LANDLOCK_CREATE_RULESET = 444
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
+ SYS_MEMFD_SECRET = 447
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
SYS_SET_MEMPOLICY_HOME_NODE = 450
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index 885842c0e..e2a64f099 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -366,30 +366,57 @@ type ICMPv6Filter struct {
Filt [8]uint32
}
+type TCPConnectionInfo struct {
+ State uint8
+ Snd_wscale uint8
+ Rcv_wscale uint8
+ _ uint8
+ Options uint32
+ Flags uint32
+ Rto uint32
+ Maxseg uint32
+ Snd_ssthresh uint32
+ Snd_cwnd uint32
+ Snd_wnd uint32
+ Snd_sbbytes uint32
+ Rcv_wnd uint32
+ Rttcur uint32
+ Srtt uint32
+ Rttvar uint32
+ Txpackets uint64
+ Txbytes uint64
+ Txretransmitbytes uint64
+ Rxpackets uint64
+ Rxbytes uint64
+ Rxoutoforderbytes uint64
+ Txretransmitpackets uint64
+}
+
const (
- SizeofSockaddrInet4 = 0x10
- SizeofSockaddrInet6 = 0x1c
- SizeofSockaddrAny = 0x6c
- SizeofSockaddrUnix = 0x6a
- SizeofSockaddrDatalink = 0x14
- SizeofSockaddrCtl = 0x20
- SizeofSockaddrVM = 0xc
- SizeofXvsockpcb = 0xa8
- SizeofXSocket = 0x64
- SizeofXSockbuf = 0x18
- SizeofXVSockPgen = 0x20
- SizeofXucred = 0x4c
- SizeofLinger = 0x8
- SizeofIovec = 0x10
- SizeofIPMreq = 0x8
- SizeofIPMreqn = 0xc
- SizeofIPv6Mreq = 0x14
- SizeofMsghdr = 0x30
- SizeofCmsghdr = 0xc
- SizeofInet4Pktinfo = 0xc
- SizeofInet6Pktinfo = 0x14
- SizeofIPv6MTUInfo = 0x20
- SizeofICMPv6Filter = 0x20
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x6c
+ SizeofSockaddrUnix = 0x6a
+ SizeofSockaddrDatalink = 0x14
+ SizeofSockaddrCtl = 0x20
+ SizeofSockaddrVM = 0xc
+ SizeofXvsockpcb = 0xa8
+ SizeofXSocket = 0x64
+ SizeofXSockbuf = 0x18
+ SizeofXVSockPgen = 0x20
+ SizeofXucred = 0x4c
+ SizeofLinger = 0x8
+ SizeofIovec = 0x10
+ SizeofIPMreq = 0x8
+ SizeofIPMreqn = 0xc
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x30
+ SizeofCmsghdr = 0xc
+ SizeofInet4Pktinfo = 0xc
+ SizeofInet6Pktinfo = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofICMPv6Filter = 0x20
+ SizeofTCPConnectionInfo = 0x70
)
const (
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index b23c02337..34aa77521 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -366,30 +366,57 @@ type ICMPv6Filter struct {
Filt [8]uint32
}
+type TCPConnectionInfo struct {
+ State uint8
+ Snd_wscale uint8
+ Rcv_wscale uint8
+ _ uint8
+ Options uint32
+ Flags uint32
+ Rto uint32
+ Maxseg uint32
+ Snd_ssthresh uint32
+ Snd_cwnd uint32
+ Snd_wnd uint32
+ Snd_sbbytes uint32
+ Rcv_wnd uint32
+ Rttcur uint32
+ Srtt uint32
+ Rttvar uint32
+ Txpackets uint64
+ Txbytes uint64
+ Txretransmitbytes uint64
+ Rxpackets uint64
+ Rxbytes uint64
+ Rxoutoforderbytes uint64
+ Txretransmitpackets uint64
+}
+
const (
- SizeofSockaddrInet4 = 0x10
- SizeofSockaddrInet6 = 0x1c
- SizeofSockaddrAny = 0x6c
- SizeofSockaddrUnix = 0x6a
- SizeofSockaddrDatalink = 0x14
- SizeofSockaddrCtl = 0x20
- SizeofSockaddrVM = 0xc
- SizeofXvsockpcb = 0xa8
- SizeofXSocket = 0x64
- SizeofXSockbuf = 0x18
- SizeofXVSockPgen = 0x20
- SizeofXucred = 0x4c
- SizeofLinger = 0x8
- SizeofIovec = 0x10
- SizeofIPMreq = 0x8
- SizeofIPMreqn = 0xc
- SizeofIPv6Mreq = 0x14
- SizeofMsghdr = 0x30
- SizeofCmsghdr = 0xc
- SizeofInet4Pktinfo = 0xc
- SizeofInet6Pktinfo = 0x14
- SizeofIPv6MTUInfo = 0x20
- SizeofICMPv6Filter = 0x20
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x6c
+ SizeofSockaddrUnix = 0x6a
+ SizeofSockaddrDatalink = 0x14
+ SizeofSockaddrCtl = 0x20
+ SizeofSockaddrVM = 0xc
+ SizeofXvsockpcb = 0xa8
+ SizeofXSocket = 0x64
+ SizeofXSockbuf = 0x18
+ SizeofXVSockPgen = 0x20
+ SizeofXucred = 0x4c
+ SizeofLinger = 0x8
+ SizeofIovec = 0x10
+ SizeofIPMreq = 0x8
+ SizeofIPMreqn = 0xc
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x30
+ SizeofCmsghdr = 0xc
+ SizeofInet4Pktinfo = 0xc
+ SizeofInet6Pktinfo = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofICMPv6Filter = 0x20
+ SizeofTCPConnectionInfo = 0x70
)
const (
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 9962d26bb..e62611e53 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -1127,7 +1127,9 @@ const (
PERF_BR_SYSRET = 0x8
PERF_BR_COND_CALL = 0x9
PERF_BR_COND_RET = 0xa
- PERF_BR_MAX = 0xb
+ PERF_BR_ERET = 0xb
+ PERF_BR_IRQ = 0xc
+ PERF_BR_MAX = 0xd
PERF_SAMPLE_REGS_ABI_NONE = 0x0
PERF_SAMPLE_REGS_ABI_32 = 0x1
PERF_SAMPLE_REGS_ABI_64 = 0x2
@@ -2969,7 +2971,7 @@ const (
DEVLINK_CMD_TRAP_POLICER_NEW = 0x47
DEVLINK_CMD_TRAP_POLICER_DEL = 0x48
DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49
- DEVLINK_CMD_MAX = 0x4d
+ DEVLINK_CMD_MAX = 0x51
DEVLINK_PORT_TYPE_NOTSET = 0x0
DEVLINK_PORT_TYPE_AUTO = 0x1
DEVLINK_PORT_TYPE_ETH = 0x2
@@ -3198,7 +3200,7 @@ const (
DEVLINK_ATTR_RATE_NODE_NAME = 0xa8
DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9
DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa
- DEVLINK_ATTR_MAX = 0xaa
+ DEVLINK_ATTR_MAX = 0xae
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1
DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0
@@ -3638,7 +3640,11 @@ const (
ETHTOOL_A_RINGS_RX_MINI = 0x7
ETHTOOL_A_RINGS_RX_JUMBO = 0x8
ETHTOOL_A_RINGS_TX = 0x9
- ETHTOOL_A_RINGS_MAX = 0xa
+ ETHTOOL_A_RINGS_RX_BUF_LEN = 0xa
+ ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb
+ ETHTOOL_A_RINGS_CQE_SIZE = 0xc
+ ETHTOOL_A_RINGS_TX_PUSH = 0xd
+ ETHTOOL_A_RINGS_MAX = 0xd
ETHTOOL_A_CHANNELS_UNSPEC = 0x0
ETHTOOL_A_CHANNELS_HEADER = 0x1
ETHTOOL_A_CHANNELS_RX_MAX = 0x2
@@ -4323,7 +4329,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x135
+ NL80211_ATTR_MAX = 0x137
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@@ -4549,7 +4555,7 @@ const (
NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY = 0x3
NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5
NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1
- NL80211_BAND_IFTYPE_ATTR_MAX = 0x7
+ NL80211_BAND_IFTYPE_ATTR_MAX = 0xb
NL80211_BAND_S1GHZ = 0x4
NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2
NL80211_BITRATE_ATTR_MAX = 0x2
@@ -4887,7 +4893,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x19
+ NL80211_FREQUENCY_ATTR_MAX = 0x1b
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
@@ -5254,7 +5260,7 @@ const (
NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1
NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5
NL80211_RATE_INFO_HE_RU_ALLOC = 0x11
- NL80211_RATE_INFO_MAX = 0x11
+ NL80211_RATE_INFO_MAX = 0x16
NL80211_RATE_INFO_MCS = 0x2
NL80211_RATE_INFO_SHORT_GI = 0x4
NL80211_RATE_INFO_VHT_MCS = 0x6
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 4948362f2..7551af483 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -324,6 +324,13 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ _ [4]byte
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index f64345e0e..3e738ac0b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -338,6 +338,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 72469c79e..6183eef4a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -315,6 +315,13 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ _ [4]byte
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index 68f072283..968cecb17 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -317,6 +317,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 090ae46c6..8fe4c522a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -318,6 +318,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 03604cca1..11426a301 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -320,6 +320,13 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ _ [4]byte
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index fe57a7b26..ad1c3b3de 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -320,6 +320,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 3f0db4da8..15fd84e4d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -320,6 +320,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 70ecd3b23..49c49825a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -320,6 +320,13 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ _ [4]byte
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index 4e700120d..cd36d0da2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -327,6 +327,13 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ _ [4]byte
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 34a57c699..8c6fce039 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -327,6 +327,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 6b84a4729..20910f2ad 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -327,6 +327,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index c4a305fe2..71b7b3331 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -345,6 +345,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index a1f1e4c9e..71184cc2c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -340,6 +340,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index df95ebf3a..06156285d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -322,6 +322,12 @@ type Taskstats struct {
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
+ Ac_tgid uint32
+ Ac_tgetime uint64
+ Ac_exe_dev uint64
+ Ac_exe_inode uint64
+ Wpcopy_count uint64
+ Wpcopy_delay_total uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
index ad4aad279..c1a9b83ad 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
@@ -178,7 +178,7 @@ type Linger struct {
}
type Iovec struct {
- Base *int8
+ Base *byte
Len uint64
}
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index cd03f8c76..52338d004 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly.
- **All tests need to be passing** before your change can be merged. We
recommend you **run tests locally** before creating your PR to catch breakages
early on.
- - `make all` to test everything, OR
- - `make vet` to catch vet errors
- - `make test` to run the tests
- - `make testrace` to run tests in race mode
+ - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
+ - `go test -cpu 1,4 -timeout 7m ./...` to run the tests
+ - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
- Exceptions to the rules can be made if there's a compelling reason for doing so.
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index bcc6f5451..f7a7697ca 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -27,6 +27,7 @@ import (
"net"
"strings"
+ "google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
@@ -192,7 +193,7 @@ type BuildOptions struct {
// server can ignore this field.
Authority string
// ChannelzParentID is the parent ClientConn's channelz ID.
- ChannelzParentID int64
+ ChannelzParentID *channelz.Identifier
// CustomUserAgent is the custom user agent set on the parent ClientConn.
// The balancer should set the same custom user agent if it creates a
// ClientConn.
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index f4ea61746..b1c23eaae 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -20,130 +20,178 @@ package grpc
import (
"fmt"
+ "strings"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/buffer"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
)
-// scStateUpdate contains the subConn and the new state it changed to.
-type scStateUpdate struct {
- sc balancer.SubConn
- state connectivity.State
- err error
-}
+// ccBalancerWrapper sits between the ClientConn and the Balancer.
+//
+// ccBalancerWrapper implements methods corresponding to the ones on the
+// balancer.Balancer interface. The ClientConn is free to call these methods
+// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
+// to the Balancer happen synchronously and in order.
+//
+// ccBalancerWrapper also implements the balancer.ClientConn interface and is
+// passed to the Balancer implementations. It invokes unexported methods on the
+// ClientConn to handle these calls from the Balancer.
+//
+// It uses the gracefulswitch.Balancer internally to ensure that balancer
+// switches happen in a graceful manner.
+type ccBalancerWrapper struct {
+ cc *ClientConn
-// exitIdle contains no data and is just a signal sent on the updateCh in
-// ccBalancerWrapper to instruct the balancer to exit idle.
-type exitIdle struct{}
+ // Since these fields are accessed only from handleXxx() methods which are
+ // synchronized by the watcher goroutine, we do not need a mutex to protect
+ // these fields.
+ balancer *gracefulswitch.Balancer
+ curBalancerName string
-// ccBalancerWrapper is a wrapper on top of cc for balancers.
-// It implements balancer.ClientConn interface.
-type ccBalancerWrapper struct {
- cc *ClientConn
- balancerMu sync.Mutex // synchronizes calls to the balancer
- balancer balancer.Balancer
- hasExitIdle bool
- updateCh *buffer.Unbounded
- closed *grpcsync.Event
- done *grpcsync.Event
-
- mu sync.Mutex
- subConns map[*acBalancerWrapper]struct{}
+ updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
+ resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
+ closed *grpcsync.Event // Indicates if close has been called.
+ done *grpcsync.Event // Indicates if close has completed its work.
}
-func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
+// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
+// is not created until the switchTo() method is invoked.
+func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
ccb := &ccBalancerWrapper{
cc: cc,
updateCh: buffer.NewUnbounded(),
+ resultCh: buffer.NewUnbounded(),
closed: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
- subConns: make(map[*acBalancerWrapper]struct{}),
}
go ccb.watcher()
- ccb.balancer = b.Build(ccb, bopts)
- _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler)
+ ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
return ccb
}
-// watcher balancer functions sequentially, so the balancer can be implemented
-// lock-free.
+// The following xxxUpdate structs wrap the arguments received as part of the
+// corresponding update. The watcher goroutine uses the 'type' of the update to
+// invoke the appropriate handler routine to handle the update.
+
+type ccStateUpdate struct {
+ ccs *balancer.ClientConnState
+}
+
+type scStateUpdate struct {
+ sc balancer.SubConn
+ state connectivity.State
+ err error
+}
+
+type exitIdleUpdate struct{}
+
+type resolverErrorUpdate struct {
+ err error
+}
+
+type switchToUpdate struct {
+ name string
+}
+
+type subConnUpdate struct {
+ acbw *acBalancerWrapper
+}
+
+// watcher is a long-running goroutine which reads updates from a channel and
+// invokes corresponding methods on the underlying balancer. It ensures that
+// these methods are invoked in a synchronous fashion. It also ensures that
+// these methods are invoked in the order in which the updates were received.
func (ccb *ccBalancerWrapper) watcher() {
for {
select {
- case t := <-ccb.updateCh.Get():
+ case u := <-ccb.updateCh.Get():
ccb.updateCh.Load()
if ccb.closed.HasFired() {
break
}
- switch u := t.(type) {
+ switch update := u.(type) {
+ case *ccStateUpdate:
+ ccb.handleClientConnStateChange(update.ccs)
case *scStateUpdate:
- ccb.balancerMu.Lock()
- ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err})
- ccb.balancerMu.Unlock()
- case *acBalancerWrapper:
- ccb.mu.Lock()
- if ccb.subConns != nil {
- delete(ccb.subConns, u)
- ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain)
- }
- ccb.mu.Unlock()
- case exitIdle:
- if ccb.cc.GetState() == connectivity.Idle {
- if ei, ok := ccb.balancer.(balancer.ExitIdler); ok {
- // We already checked that the balancer implements
- // ExitIdle before pushing the event to updateCh, but
- // check conditionally again as defensive programming.
- ccb.balancerMu.Lock()
- ei.ExitIdle()
- ccb.balancerMu.Unlock()
- }
- }
+ ccb.handleSubConnStateChange(update)
+ case *exitIdleUpdate:
+ ccb.handleExitIdle()
+ case *resolverErrorUpdate:
+ ccb.handleResolverError(update.err)
+ case *switchToUpdate:
+ ccb.handleSwitchTo(update.name)
+ case *subConnUpdate:
+ ccb.handleRemoveSubConn(update.acbw)
default:
- logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t)
+ logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
}
case <-ccb.closed.Done():
}
if ccb.closed.HasFired() {
- ccb.balancerMu.Lock()
- ccb.balancer.Close()
- ccb.balancerMu.Unlock()
- ccb.mu.Lock()
- scs := ccb.subConns
- ccb.subConns = nil
- ccb.mu.Unlock()
- ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
- ccb.done.Fire()
- // Fire done before removing the addr conns. We can safely unblock
- // ccb.close and allow the removeAddrConns to happen
- // asynchronously.
- for acbw := range scs {
- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
- }
+ ccb.handleClose()
return
}
}
}
-func (ccb *ccBalancerWrapper) close() {
- ccb.closed.Fire()
- <-ccb.done.Done()
+// updateClientConnState is invoked by grpc to push a ClientConnState update to
+// the underlying balancer.
+//
+// Unlike other methods invoked by grpc to push updates to the underlying
+// balancer, this method cannot simply push the update onto the update channel
+// and return. It needs to return the error returned by the underlying balancer
+// back to grpc which propagates that to the resolver.
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+ ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
+
+ var res interface{}
+ select {
+ case res = <-ccb.resultCh.Get():
+ ccb.resultCh.Load()
+ case <-ccb.closed.Done():
+ // Return early if the balancer wrapper is closed while we are waiting for
+ // the underlying balancer to process a ClientConnState update.
+ return nil
+ }
+ // If the returned error is nil, attempting to type assert to error leads to
+ // panic. So, this needs to handled separately.
+ if res == nil {
+ return nil
+ }
+ return res.(error)
}
-func (ccb *ccBalancerWrapper) exitIdle() bool {
- if !ccb.hasExitIdle {
- return false
+// handleClientConnStateChange handles a ClientConnState update from the update
+// channel and invokes the appropriate method on the underlying balancer.
+//
+// If the addresses specified in the update contain addresses of type "grpclb"
+// and the selected LB policy is not "grpclb", these addresses will be filtered
+// out and ccs will be modified with the updated address list.
+func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
+ if ccb.curBalancerName != grpclbName {
+ // Filter any grpclb addresses since we don't have the grpclb balancer.
+ var addrs []resolver.Address
+ for _, addr := range ccs.ResolverState.Addresses {
+ if addr.Type == resolver.GRPCLB {
+ continue
+ }
+ addrs = append(addrs, addr)
+ }
+ ccs.ResolverState.Addresses = addrs
}
- ccb.updateCh.Put(exitIdle{})
- return true
+ ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
}
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
+// updateSubConnState is invoked by grpc to push a subConn state update to the
+// underlying balancer.
+func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
// When updating addresses for a SubConn, if the address in use is not in
// the new addresses, the old ac will be tearDown() and a new ac will be
// created. tearDown() generates a state change with Shutdown state, we
@@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
})
}
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
- ccb.balancerMu.Lock()
- defer ccb.balancerMu.Unlock()
- return ccb.balancer.UpdateClientConnState(*ccs)
+// handleSubConnStateChange handles a SubConnState update from the update
+// channel and invokes the appropriate method on the underlying balancer.
+func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
+ ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
+}
+
+func (ccb *ccBalancerWrapper) exitIdle() {
+ ccb.updateCh.Put(&exitIdleUpdate{})
+}
+
+func (ccb *ccBalancerWrapper) handleExitIdle() {
+ if ccb.cc.GetState() != connectivity.Idle {
+ return
+ }
+ ccb.balancer.ExitIdle()
}
func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.balancerMu.Lock()
- defer ccb.balancerMu.Unlock()
+ ccb.updateCh.Put(&resolverErrorUpdate{err: err})
+}
+
+func (ccb *ccBalancerWrapper) handleResolverError(err error) {
ccb.balancer.ResolverError(err)
}
+// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
+// LB policy identified by name.
+//
+// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
+// first good update from the name resolver, it determines the LB policy to use
+// and invokes the switchTo() method. Upon receipt of every subsequent update
+// from the name resolver, it invokes this method.
+//
+// the ccBalancerWrapper keeps track of the current LB policy name, and skips
+// the graceful balancer switching process if the name does not change.
+func (ccb *ccBalancerWrapper) switchTo(name string) {
+ ccb.updateCh.Put(&switchToUpdate{name: name})
+}
+
+// handleSwitchTo handles a balancer switch update from the update channel. It
+// calls the SwitchTo() method on the gracefulswitch.Balancer with a
+// balancer.Builder corresponding to name. If no balancer.Builder is registered
+// for the given name, it uses the default LB policy which is "pick_first".
+func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
+ // TODO: Other languages use case-insensitive balancer registries. We should
+ // switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
+ if strings.EqualFold(ccb.curBalancerName, name) {
+ return
+ }
+
+ // TODO: Ensure that name is a registered LB policy when we get here.
+ // We currently only validate the `loadBalancingConfig` field. We need to do
+ // the same for the `loadBalancingPolicy` field and reject the service config
+ // if the specified policy is not registered.
+ builder := balancer.Get(name)
+ if builder == nil {
+ channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
+ builder = newPickfirstBuilder()
+ } else {
+ channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
+ }
+
+ if err := ccb.balancer.SwitchTo(builder); err != nil {
+ channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
+ return
+ }
+ ccb.curBalancerName = builder.Name()
+}
+
+// handleRemoveSucConn handles a request from the underlying balancer to remove
+// a subConn.
+//
+// See comments in RemoveSubConn() for more details.
+func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
+ ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
+}
+
+func (ccb *ccBalancerWrapper) close() {
+ ccb.closed.Fire()
+ <-ccb.done.Done()
+}
+
+func (ccb *ccBalancerWrapper) handleClose() {
+ ccb.balancer.Close()
+ ccb.done.Fire()
+}
+
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
if len(addrs) <= 0 {
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
}
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
- }
ac, err := ccb.cc.newAddrConn(addrs, opts)
if err != nil {
+ channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
return nil, err
}
acbw := &acBalancerWrapper{ac: ac}
acbw.ac.mu.Lock()
ac.acbw = acbw
acbw.ac.mu.Unlock()
- ccb.subConns[acbw] = struct{}{}
return acbw, nil
}
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
- // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock
- // during switchBalancer() if the old balancer calls RemoveSubConn() in its
- // Close().
- ccb.updateCh.Put(sc)
+ // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
+ // was required to handle the RemoveSubConn() method asynchronously by pushing
+ // the update onto the update channel. This was done to avoid a deadlock as
+ // switchBalancer() was holding cc.mu when calling Close() on the old
+ // balancer, which would in turn call RemoveSubConn().
+ //
+ // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
+ // asynchronously is probably not required anymore since the switchTo() method
+ // handles the balancer switch by pushing the update onto the channel.
+ // TODO(easwars): Handle this inline.
+ acbw, ok := sc.(*acBalancerWrapper)
+ if !ok {
+ return
+ }
+ ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
}
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
@@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return
- }
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is
diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go
new file mode 100644
index 000000000..a220c47c5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/channelz/channelz.go
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package channelz exports internals of the channelz implementation as required
+// by other gRPC packages.
+//
+// The implementation of the channelz spec as defined in
+// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
+// the `internal/channelz` package.
+//
+// Experimental
+//
+// Notice: All APIs in this package are experimental and may be removed in a
+// later release.
+package channelz
+
+import "google.golang.org/grpc/internal/channelz"
+
+// Identifier is an opaque identifier which uniquely identifies an entity in the
+// channelz database.
+type Identifier = channelz.Identifier
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 28f09dc87..de6d41c23 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -79,7 +79,7 @@ var (
// errNoTransportSecurity indicates that there is no transport security
// being set for ClientConn. Users should either set one or explicitly
// call WithInsecure DialOption to disable security.
- errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
+ errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)")
// errTransportCredsAndBundle indicates that creds bundle is used together
// with other individual Transport Credentials.
errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
@@ -159,23 +159,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
- if channelz.IsOn() {
- if cc.dopts.channelzParentID != 0 {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
- channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
- Desc: "Channel Created",
- Severity: channelz.CtInfo,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
- Severity: channelz.CtInfo,
- },
- })
- } else {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
- channelz.Info(logger, cc.channelzID, "Channel Created")
+ pid := cc.dopts.channelzParentID
+ cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
+ ted := &channelz.TraceEventDesc{
+ Desc: "Channel created",
+ Severity: channelz.CtInfo,
+ }
+ if cc.dopts.channelzParentID != nil {
+ ted.Parent = &channelz.TraceEventDesc{
+ Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
+ Severity: channelz.CtInfo,
}
- cc.csMgr.channelzID = cc.channelzID
}
+ channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
+ cc.csMgr.channelzID = cc.channelzID
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
return nil, errNoTransportSecurity
@@ -281,7 +278,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
credsClone = creds.Clone()
}
- cc.balancerBuildOpts = balancer.BuildOptions{
+ cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
DialCreds: credsClone,
CredsBundle: cc.dopts.copts.CredsBundle,
Dialer: cc.dopts.copts.Dialer,
@@ -289,7 +286,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParentID: cc.channelzID,
Target: cc.parsedTarget,
- }
+ })
// Build the resolver.
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
@@ -398,7 +395,7 @@ type connectivityStateManager struct {
mu sync.Mutex
state connectivity.State
notifyChan chan struct{}
- channelzID int64
+ channelzID *channelz.Identifier
}
// updateState updates the connectivity.State of ClientConn.
@@ -464,34 +461,36 @@ var _ ClientConnInterface = (*ClientConn)(nil)
// handshakes. It also handles errors on established connections by
// re-resolving the name and reconnecting.
type ClientConn struct {
- ctx context.Context
- cancel context.CancelFunc
-
- target string
- parsedTarget resolver.Target
- authority string
- dopts dialOptions
- csMgr *connectivityStateManager
-
- balancerBuildOpts balancer.BuildOptions
- blockingpicker *pickerWrapper
-
+ ctx context.Context // Initialized using the background context at dial time.
+ cancel context.CancelFunc // Cancelled on close.
+
+ // The following are initialized at dial time, and are read-only after that.
+ target string // User's dial target.
+ parsedTarget resolver.Target // See parseTargetAndFindResolver().
+ authority string // See determineAuthority().
+ dopts dialOptions // Default and user specified dial options.
+ channelzID *channelz.Identifier // Channelz identifier for the channel.
+ balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
+
+ // The following provide their own synchronization, and therefore don't
+ // require cc.mu to be held to access them.
+ csMgr *connectivityStateManager
+ blockingpicker *pickerWrapper
safeConfigSelector iresolver.SafeConfigSelector
+ czData *channelzData
+ retryThrottler atomic.Value // Updated from service config.
- mu sync.RWMutex
- resolverWrapper *ccResolverWrapper
- sc *ServiceConfig
- conns map[*addrConn]struct{}
- // Keepalive parameter can be updated if a GoAway is received.
- mkp keepalive.ClientParameters
- curBalancerName string
- balancerWrapper *ccBalancerWrapper
- retryThrottler atomic.Value
-
+ // firstResolveEvent is used to track whether the name resolver sent us at
+ // least one update. RPCs block on this event.
firstResolveEvent *grpcsync.Event
- channelzID int64 // channelz unique identification number
- czData *channelzData
+ // mu protects the following fields.
+ // TODO: split mu so the same mutex isn't used for everything.
+ mu sync.RWMutex
+ resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close.
+ sc *ServiceConfig // Latest service config received from the resolver.
+ conns map[*addrConn]struct{} // Set to nil on close.
+ mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
@@ -536,14 +535,7 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() {
- return
- }
- for ac := range cc.conns {
- go ac.connect()
- }
+ cc.balancerWrapper.exitIdle()
}
func (cc *ClientConn) scWatcher() {
@@ -623,9 +615,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
// with the new addresses.
cc.maybeApplyDefaultServiceConfig(nil)
- if cc.balancerWrapper != nil {
- cc.balancerWrapper.resolverError(err)
- }
+ cc.balancerWrapper.resolverError(err)
// No addresses are valid with err set; return early.
cc.mu.Unlock()
@@ -653,16 +643,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
} else {
ret = balancer.ErrBadResolverState
- if cc.balancerWrapper == nil {
- var err error
- if s.ServiceConfig.Err != nil {
- err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
- } else {
- err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
- }
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc})
- cc.blockingpicker.updatePicker(base.NewErrPicker(err))
- cc.csMgr.updateState(connectivity.TransientFailure)
+ if cc.sc == nil {
+ // Apply the failing LB only if we haven't received valid service config
+ // from the name resolver in the past.
+ cc.applyFailingLB(s.ServiceConfig)
cc.mu.Unlock()
return ret
}
@@ -670,24 +654,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
}
var balCfg serviceconfig.LoadBalancingConfig
- if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
+ if cc.sc != nil && cc.sc.lbConfig != nil {
balCfg = cc.sc.lbConfig.cfg
}
-
- cbn := cc.curBalancerName
bw := cc.balancerWrapper
cc.mu.Unlock()
- if cbn != grpclbName {
- // Filter any grpclb addresses since we don't have the grpclb balancer.
- for i := 0; i < len(s.Addresses); {
- if s.Addresses[i].Type == resolver.GRPCLB {
- copy(s.Addresses[i:], s.Addresses[i+1:])
- s.Addresses = s.Addresses[:len(s.Addresses)-1]
- continue
- }
- i++
- }
- }
+
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
if ret == nil {
ret = uccsErr // prefer ErrBadResolver state since any other error is
@@ -696,56 +668,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
return ret
}
-// switchBalancer starts the switching from current balancer to the balancer
-// with the given name.
-//
-// It will NOT send the current address list to the new balancer. If needed,
-// caller of this function should send address list to the new balancer after
-// this function returns.
+// applyFailingLB is akin to configuring an LB policy on the channel which
+// always fails RPCs. Here, an actual LB policy is not configured, but an always
+// erroring picker is configured, which returns errors with information about
+// what was invalid in the received service config. A config selector with no
+// service config is configured, and the connectivity state of the channel is
+// set to TransientFailure.
//
// Caller must hold cc.mu.
-func (cc *ClientConn) switchBalancer(name string) {
- if strings.EqualFold(cc.curBalancerName, name) {
- return
- }
-
- channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
- if cc.dopts.balancerBuilder != nil {
- channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
- return
- }
- if cc.balancerWrapper != nil {
- // Don't hold cc.mu while closing the balancers. The balancers may call
- // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex
- // would cause a deadlock in that case.
- cc.mu.Unlock()
- cc.balancerWrapper.close()
- cc.mu.Lock()
- }
-
- builder := balancer.Get(name)
- if builder == nil {
- channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
- channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
- builder = newPickfirstBuilder()
+func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
+ var err error
+ if sc.Err != nil {
+ err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
} else {
- channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
+ err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
}
-
- cc.curBalancerName = builder.Name()
- cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
+ cc.blockingpicker.updatePicker(base.NewErrPicker(err))
+ cc.csMgr.updateState(connectivity.TransientFailure)
}
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return
- }
- // TODO(bar switching) send updates to all balancer wrappers when balancer
- // gracefully switching is supported.
- cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
- cc.mu.Unlock()
+ cc.balancerWrapper.updateSubConnState(sc, s, err)
}
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
@@ -768,17 +712,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
cc.mu.Unlock()
return nil, ErrClientConnClosing
}
- if channelz.IsOn() {
- ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
- channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
- Desc: "Subchannel Created",
- Severity: channelz.CtInfo,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
- Severity: channelz.CtInfo,
- },
- })
+
+ var err error
+ ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
+ if err != nil {
+ return nil, err
}
+ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
+ Desc: "Subchannel created",
+ Severity: channelz.CtInfo,
+ Parent: &channelz.TraceEventDesc{
+ Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()),
+ Severity: channelz.CtInfo,
+ },
+ })
+
cc.conns[ac] = struct{}{}
cc.mu.Unlock()
return ac, nil
@@ -853,16 +801,31 @@ func (ac *addrConn) connect() error {
return nil
}
+func equalAddresses(a, b []resolver.Address) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if !v.Equal(b[i]) {
+ return false
+ }
+ }
+ return true
+}
+
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
//
-// If ac is Connecting, it returns false. The caller should tear down the ac and
-// create a new one. Note that the backoff will be reset when this happens.
-//
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
// addresses will be picked up by retry in the next iteration after backoff.
//
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
//
+// If the addresses is the same as the old list, it does nothing and returns
+// true.
+//
+// If ac is Connecting, it returns false. The caller should tear down the ac and
+// create a new one. Note that the backoff will be reset when this happens.
+//
// If ac is Ready, it checks whether current connected address of ac is in the
// new addrs list.
// - If true, it updates ac.addrs and returns true. The ac will keep using
@@ -879,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
return true
}
+ if equalAddresses(ac.addrs, addrs) {
+ return true
+ }
+
if ac.state == connectivity.Connecting {
return false
}
@@ -959,14 +926,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
- t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
+ return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
Ctx: ctx,
FullMethodName: method,
})
- if err != nil {
- return nil, nil, toRPCErr(err)
- }
- return t, done, nil
}
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
@@ -991,35 +954,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
cc.retryThrottler.Store((*retryThrottler)(nil))
}
- if cc.dopts.balancerBuilder == nil {
- // Only look at balancer types and switch balancer if balancer dial
- // option is not set.
- var newBalancerName string
- if cc.sc != nil && cc.sc.lbConfig != nil {
- newBalancerName = cc.sc.lbConfig.name
- } else {
- var isGRPCLB bool
- for _, a := range addrs {
- if a.Type == resolver.GRPCLB {
- isGRPCLB = true
- break
- }
- }
- if isGRPCLB {
- newBalancerName = grpclbName
- } else if cc.sc != nil && cc.sc.LB != nil {
- newBalancerName = *cc.sc.LB
- } else {
- newBalancerName = PickFirstBalancerName
+ var newBalancerName string
+ if cc.sc != nil && cc.sc.lbConfig != nil {
+ newBalancerName = cc.sc.lbConfig.name
+ } else {
+ var isGRPCLB bool
+ for _, a := range addrs {
+ if a.Type == resolver.GRPCLB {
+ isGRPCLB = true
+ break
}
}
- cc.switchBalancer(newBalancerName)
- } else if cc.balancerWrapper == nil {
- // Balancer dial option was set, and this is the first time handling
- // resolved addresses. Build a balancer with dopts.balancerBuilder.
- cc.curBalancerName = cc.dopts.balancerBuilder.Name()
- cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
+ if isGRPCLB {
+ newBalancerName = grpclbName
+ } else if cc.sc != nil && cc.sc.LB != nil {
+ newBalancerName = *cc.sc.LB
+ } else {
+ newBalancerName = PickFirstBalancerName
+ }
}
+ cc.balancerWrapper.switchTo(newBalancerName)
}
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
@@ -1070,11 +1024,11 @@ func (cc *ClientConn) Close() error {
rWrapper := cc.resolverWrapper
cc.resolverWrapper = nil
bWrapper := cc.balancerWrapper
- cc.balancerWrapper = nil
cc.mu.Unlock()
+ // The order of closing matters here since the balancer wrapper assumes the
+ // picker is closed before it is closed.
cc.blockingpicker.close()
-
if bWrapper != nil {
bWrapper.close()
}
@@ -1085,22 +1039,22 @@ func (cc *ClientConn) Close() error {
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
}
- if channelz.IsOn() {
- ted := &channelz.TraceEventDesc{
- Desc: "Channel Deleted",
+ ted := &channelz.TraceEventDesc{
+ Desc: "Channel deleted",
+ Severity: channelz.CtInfo,
+ }
+ if cc.dopts.channelzParentID != nil {
+ ted.Parent = &channelz.TraceEventDesc{
+ Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
Severity: channelz.CtInfo,
}
- if cc.dopts.channelzParentID != 0 {
- ted.Parent = &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
- Severity: channelz.CtInfo,
- }
- }
- channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
- // the entity being deleted, and thus prevent it from being deleted right away.
- channelz.RemoveEntry(cc.channelzID)
}
+ channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
+ // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+ // trace reference to the entity being deleted, and thus prevent it from being
+ // deleted right away.
+ channelz.RemoveEntry(cc.channelzID)
+
return nil
}
@@ -1130,7 +1084,7 @@ type addrConn struct {
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
- channelzID int64 // channelz unique identification number.
+ channelzID *channelz.Identifier
czData *channelzData
}
@@ -1284,6 +1238,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
ac.mu.Lock()
defer ac.mu.Unlock()
defer connClosed.Fire()
+ defer hcancel()
if !hcStarted || hctx.Err() != nil {
// We didn't start the health check or set the state to READY, so
// no need to do anything else here.
@@ -1294,7 +1249,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
// state, since there may be a new transport in this addrConn.
return
}
- hcancel()
ac.transport = nil
// Refresh the name resolver
ac.cc.resolveNow(resolver.ResolveNowOptions{})
@@ -1312,14 +1266,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
defer cancel()
- if channelz.IsOn() {
- copts.ChannelzParentID = ac.channelzID
- }
+ copts.ChannelzParentID = ac.channelzID
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
if err != nil {
// newTr is either nil, or closed.
- channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err)
+ hcancel()
+ channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
return err
}
@@ -1332,7 +1285,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
newTr.Close(transport.ErrConnClosing)
if connectCtx.Err() == context.DeadlineExceeded {
err := errors.New("failed to receive server preface within timeout")
- channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err)
+ channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err)
return err
}
return nil
@@ -1497,19 +1450,18 @@ func (ac *addrConn) tearDown(err error) {
curTr.GracefulClose()
ac.mu.Lock()
}
- if channelz.IsOn() {
- channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
- Desc: "Subchannel Deleted",
+ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
+ Desc: "Subchannel deleted",
+ Severity: channelz.CtInfo,
+ Parent: &channelz.TraceEventDesc{
+ Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()),
Severity: channelz.CtInfo,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
- Severity: channelz.CtInfo,
- },
- })
- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
- // the entity being deleted, and thus prevent it from being deleted right away.
- channelz.RemoveEntry(ac.channelzID)
- }
+ },
+ })
+ // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+ // trace reference to the entity being deleted, and thus prevent it from
+ // being deleted right away.
+ channelz.RemoveEntry(ac.channelzID)
ac.mu.Unlock()
}
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
index 4fbed1256..82bee1443 100644
--- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -70,3 +70,29 @@ type info struct {
func (info) AuthType() string {
return "insecure"
}
+
+// insecureBundle implements an insecure bundle.
+// An insecure bundle provides a thin wrapper around insecureTC to support
+// the credentials.Bundle interface.
+type insecureBundle struct{}
+
+// NewBundle returns a bundle with disabled transport security and no per rpc credential.
+func NewBundle() credentials.Bundle {
+ return insecureBundle{}
+}
+
+// NewWithMode returns a new insecure Bundle. The mode is ignored.
+func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
+ return insecureBundle{}, nil
+}
+
+// PerRPCCredentials returns an nil implementation as insecure
+// bundle does not support a per rpc credential.
+func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
+ return nil
+}
+
+// TransportCredentials returns the underlying insecure transport credential.
+func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
+ return NewCredentials()
+}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index c4bf09f9e..f2f605a17 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -20,12 +20,11 @@ package grpc
import (
"context"
- "fmt"
"net"
"time"
"google.golang.org/grpc/backoff"
- "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/channelz"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/internal"
@@ -45,19 +44,17 @@ type dialOptions struct {
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
- cp Compressor
- dc Decompressor
- bs internalbackoff.Strategy
- block bool
- returnLastError bool
- timeout time.Duration
- scChan <-chan ServiceConfig
- authority string
- copts transport.ConnectOptions
- callOptions []CallOption
- // This is used by WithBalancerName dial option.
- balancerBuilder balancer.Builder
- channelzParentID int64
+ cp Compressor
+ dc Decompressor
+ bs internalbackoff.Strategy
+ block bool
+ returnLastError bool
+ timeout time.Duration
+ scChan <-chan ServiceConfig
+ authority string
+ copts transport.ConnectOptions
+ callOptions []CallOption
+ channelzParentID *channelz.Identifier
disableServiceConfig bool
disableRetry bool
disableHealthCheck bool
@@ -195,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption {
})
}
-// WithBalancerName sets the balancer that the ClientConn will be initialized
-// with. Balancer registered with balancerName will be used. This function
-// panics if no balancer was registered by balancerName.
-//
-// The balancer cannot be overridden by balancer option specified by service
-// config.
-//
-// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
-// instead. Will be removed in a future 1.x release.
-func WithBalancerName(balancerName string) DialOption {
- builder := balancer.Get(balancerName)
- if builder == nil {
- panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
- }
- return newFuncDialOption(func(o *dialOptions) {
- o.balancerBuilder = builder
- })
-}
-
// WithServiceConfig returns a DialOption which has a channel to read the
// service configuration.
//
@@ -304,8 +282,8 @@ func WithReturnConnectionError() DialOption {
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
// security is incompatible and will cause grpc.Dial() to fail.
//
-// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead.
-// Will be supported throughout 1.x.
+// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
+// instead. Will be supported throughout 1.x.
func WithInsecure() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.TransportCredentials = insecure.NewCredentials()
@@ -498,7 +476,7 @@ func WithAuthority(a string) DialOption {
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
-func WithChannelzParentID(id int64) DialOption {
+func WithChannelzParentID(id *channelz.Identifier) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.channelzParentID = id
})
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 6d84f74c7..18e530fc9 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec)
// more details.
//
// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Compressors are
+// an init() function), and is not thread-safe. If multiple Codecs are
// registered with the same name, the one registered last will take effect.
func RegisterCodec(codec Codec) {
if codec == nil {
diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod
index fcffdceef..6a760ed74 100644
--- a/vendor/google.golang.org/grpc/go.mod
+++ b/vendor/google.golang.org/grpc/go.mod
@@ -6,14 +6,14 @@ require (
github.com/cespare/xxhash/v2 v2.1.1
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1
- github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021
+ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
- github.com/golang/protobuf v1.4.3
- github.com/google/go-cmp v0.5.0
+ github.com/golang/protobuf v1.5.2
+ github.com/google/go-cmp v0.5.6
github.com/google/uuid v1.1.2
- golang.org/x/net v0.0.0-20200822124328-c89045814202
+ golang.org/x/net v0.0.0-20201021035429-f5854403a974
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
- golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
+ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
- google.golang.org/protobuf v1.25.0
+ google.golang.org/protobuf v1.27.1
)
diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum
index 8b542e0be..5f418dba1 100644
--- a/vendor/google.golang.org/grpc/go.sum
+++ b/vendor/google.golang.org/grpc/go.sum
@@ -12,8 +12,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
@@ -22,8 +22,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -40,14 +40,18 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
@@ -72,8 +76,9 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -84,10 +89,14 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -117,8 +126,11 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
index 668e0adcf..bb96ef57b 100644
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -72,9 +72,12 @@ type UnaryServerInfo struct {
}
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
-// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
-// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
-// the status message of the RPC.
+// execution of a unary RPC.
+//
+// If a UnaryHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
new file mode 100644
index 000000000..7ba8f4d18
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -0,0 +1,382 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gracefulswitch implements a graceful switch load balancer.
+package gracefulswitch
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/resolver"
+)
+
+var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
+var _ balancer.Balancer = (*Balancer)(nil)
+
+// NewBalancer returns a graceful switch Balancer.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
+ return &Balancer{
+ cc: cc,
+ bOpts: opts,
+ }
+}
+
+// Balancer is a utility to gracefully switch from one balancer to
+// a new balancer. It implements the balancer.Balancer interface.
+type Balancer struct {
+ bOpts balancer.BuildOptions
+ cc balancer.ClientConn
+
+ // mu protects the following fields and all fields within balancerCurrent
+ // and balancerPending. mu does not need to be held when calling into the
+ // child balancers, as all calls into these children happen only as a direct
+ // result of a call into the gracefulSwitchBalancer, which are also
+ // guaranteed to be synchronous. There is one exception: an UpdateState call
+ // from a child balancer when current and pending are populated can lead to
+ // calling Close() on the current. To prevent that racing with an
+ // UpdateSubConnState from the channel, we hold currentMu during Close and
+ // UpdateSubConnState calls.
+ mu sync.Mutex
+ balancerCurrent *balancerWrapper
+ balancerPending *balancerWrapper
+ closed bool // set to true when this balancer is closed
+
+ // currentMu must be locked before mu. This mutex guards against this
+ // sequence of events: UpdateSubConnState() called, finds the
+ // balancerCurrent, gives up lock, updateState comes in, causes Close() on
+ // balancerCurrent before the UpdateSubConnState is called on the
+ // balancerCurrent.
+ currentMu sync.Mutex
+}
+
+// swap swaps out the current lb with the pending lb and updates the ClientConn.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) swap() {
+ gsb.cc.UpdateState(gsb.balancerPending.lastState)
+ cur := gsb.balancerCurrent
+ gsb.balancerCurrent = gsb.balancerPending
+ gsb.balancerPending = nil
+ go func() {
+ gsb.currentMu.Lock()
+ defer gsb.currentMu.Unlock()
+ cur.Close()
+ }()
+}
+
+// Helper function that checks if the balancer passed in is current or pending.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
+ return bw == gsb.balancerCurrent || bw == gsb.balancerPending
+}
+
+// SwitchTo initializes the graceful switch process, which completes based on
+// connectivity state changes on the current/pending balancer. Thus, the switch
+// process is not complete when this method returns. This method must be called
+// synchronously alongside the rest of the balancer.Balancer methods this
+// Graceful Switch Balancer implements.
+func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
+ gsb.mu.Lock()
+ if gsb.closed {
+ gsb.mu.Unlock()
+ return errBalancerClosed
+ }
+ bw := &balancerWrapper{
+ gsb: gsb,
+ lastState: balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
+ },
+ subconns: make(map[balancer.SubConn]bool),
+ }
+ balToClose := gsb.balancerPending // nil if there is no pending balancer
+ if gsb.balancerCurrent == nil {
+ gsb.balancerCurrent = bw
+ } else {
+ gsb.balancerPending = bw
+ }
+ gsb.mu.Unlock()
+ balToClose.Close()
+ // This function takes a builder instead of a balancer because builder.Build
+ // can call back inline, and this utility needs to handle the callbacks.
+ newBalancer := builder.Build(bw, gsb.bOpts)
+ if newBalancer == nil {
+ // This is illegal and should never happen; we clear the balancerWrapper
+ // we were constructing if it happens to avoid a potential panic.
+ gsb.mu.Lock()
+ if gsb.balancerPending != nil {
+ gsb.balancerPending = nil
+ } else {
+ gsb.balancerCurrent = nil
+ }
+ gsb.mu.Unlock()
+ return balancer.ErrBadResolverState
+ }
+
+ // This write doesn't need to take gsb.mu because this field never gets read
+ // or written to on any calls from the current or pending. Calls from grpc
+ // to this balancer are guaranteed to be called synchronously, so this
+ // bw.Balancer field will never be forwarded to until this SwitchTo()
+ // function returns.
+ bw.Balancer = newBalancer
+ return nil
+}
+
+// Returns nil if the graceful switch balancer is closed.
+func (gsb *Balancer) latestBalancer() *balancerWrapper {
+ gsb.mu.Lock()
+ defer gsb.mu.Unlock()
+ if gsb.balancerPending != nil {
+ return gsb.balancerPending
+ }
+ return gsb.balancerCurrent
+}
+
+// UpdateClientConnState forwards the update to the latest balancer created.
+func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ // The resolver data is only relevant to the most recent LB Policy.
+ balToUpdate := gsb.latestBalancer()
+ if balToUpdate == nil {
+ return errBalancerClosed
+ }
+ // Perform this call without gsb.mu to prevent deadlocks if the child calls
+ // back into the channel. The latest balancer can never be closed during a
+ // call from the channel, even without gsb.mu held.
+ return balToUpdate.UpdateClientConnState(state)
+}
+
+// ResolverError forwards the error to the latest balancer created.
+func (gsb *Balancer) ResolverError(err error) {
+ // The resolver data is only relevant to the most recent LB Policy.
+ balToUpdate := gsb.latestBalancer()
+ if balToUpdate == nil {
+ return
+ }
+ // Perform this call without gsb.mu to prevent deadlocks if the child calls
+ // back into the channel. The latest balancer can never be closed during a
+ // call from the channel, even without gsb.mu held.
+ balToUpdate.ResolverError(err)
+}
+
+// ExitIdle forwards the call to the latest balancer created.
+//
+// If the latest balancer does not support ExitIdle, the subConns are
+// re-connected to manually.
+func (gsb *Balancer) ExitIdle() {
+ balToUpdate := gsb.latestBalancer()
+ if balToUpdate == nil {
+ return
+ }
+ // There is no need to protect this read with a mutex, as the write to the
+ // Balancer field happens in SwitchTo, which completes before this can be
+ // called.
+ if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
+ ei.ExitIdle()
+ return
+ }
+ for sc := range balToUpdate.subconns {
+ sc.Connect()
+ }
+}
+
+// UpdateSubConnState forwards the update to the appropriate child.
+func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+ gsb.currentMu.Lock()
+ defer gsb.currentMu.Unlock()
+ gsb.mu.Lock()
+ // Forward update to the appropriate child. Even if there is a pending
+ // balancer, the current balancer should continue to get SubConn updates to
+ // maintain the proper state while the pending is still connecting.
+ var balToUpdate *balancerWrapper
+ if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
+ balToUpdate = gsb.balancerCurrent
+ } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
+ balToUpdate = gsb.balancerPending
+ }
+ gsb.mu.Unlock()
+ if balToUpdate == nil {
+ // SubConn belonged to a stale lb policy that has not yet fully closed,
+ // or the balancer was already closed.
+ return
+ }
+ balToUpdate.UpdateSubConnState(sc, state)
+}
+
+// Close closes any active child balancers.
+func (gsb *Balancer) Close() {
+ gsb.mu.Lock()
+ gsb.closed = true
+ currentBalancerToClose := gsb.balancerCurrent
+ gsb.balancerCurrent = nil
+ pendingBalancerToClose := gsb.balancerPending
+ gsb.balancerPending = nil
+ gsb.mu.Unlock()
+
+ currentBalancerToClose.Close()
+ pendingBalancerToClose.Close()
+}
+
+// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
+// methods to help cleanup SubConns created by the wrapped balancer.
+//
+// It implements the balancer.ClientConn interface and is passed down in that
+// capacity to the wrapped balancer. It maintains a set of subConns created by
+// the wrapped balancer and calls from the latter to create/update/remove
+// SubConns update this set before being forwarded to the parent ClientConn.
+// State updates from the wrapped balancer can result in invocation of the
+// graceful switch logic.
+type balancerWrapper struct {
+ balancer.Balancer
+ gsb *Balancer
+
+ lastState balancer.State
+ subconns map[balancer.SubConn]bool // subconns created by this balancer
+}
+
+func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+ if state.ConnectivityState == connectivity.Shutdown {
+ bw.gsb.mu.Lock()
+ delete(bw.subconns, sc)
+ bw.gsb.mu.Unlock()
+ }
+ // There is no need to protect this read with a mutex, as the write to the
+ // Balancer field happens in SwitchTo, which completes before this can be
+ // called.
+ bw.Balancer.UpdateSubConnState(sc, state)
+}
+
+// Close closes the underlying LB policy and removes the subconns it created. bw
+// must not be referenced via balancerCurrent or balancerPending in gsb when
+// called. gsb.mu must not be held. Does not panic with a nil receiver.
+func (bw *balancerWrapper) Close() {
+ // before Close is called.
+ if bw == nil {
+ return
+ }
+ // There is no need to protect this read with a mutex, as Close() is
+ // impossible to be called concurrently with the write in SwitchTo(). The
+ // callsites of Close() for this balancer in Graceful Switch Balancer will
+ // never be called until SwitchTo() returns.
+ bw.Balancer.Close()
+ bw.gsb.mu.Lock()
+ for sc := range bw.subconns {
+ bw.gsb.cc.RemoveSubConn(sc)
+ }
+ bw.gsb.mu.Unlock()
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+ // Hold the mutex for this entire call to ensure it cannot occur
+ // concurrently with other updateState() calls. This causes updates to
+ // lastState and calls to cc.UpdateState to happen atomically.
+ bw.gsb.mu.Lock()
+ defer bw.gsb.mu.Unlock()
+ bw.lastState = state
+
+ if !bw.gsb.balancerCurrentOrPending(bw) {
+ return
+ }
+
+ if bw == bw.gsb.balancerCurrent {
+ // In the case that the current balancer exits READY, and there is a pending
+ // balancer, you can forward the pending balancer's cached State up to
+ // ClientConn and swap the pending into the current. This is because there
+ // is no reason to gracefully switch from and keep using the old policy as
+ // the ClientConn is not connected to any backends.
+ if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
+ bw.gsb.swap()
+ return
+ }
+ // Even if there is a pending balancer waiting to be gracefully switched to,
+ // continue to forward current balancer updates to the Client Conn. Ignoring
+ // state + picker from the current would cause undefined behavior/cause the
+ // system to behave incorrectly from the current LB policies perspective.
+ // Also, the current LB is still being used by grpc to choose SubConns per
+ // RPC, and thus should use the most updated form of the current balancer.
+ bw.gsb.cc.UpdateState(state)
+ return
+ }
+ // This method is now dealing with a state update from the pending balancer.
+ // If the current balancer is currently in a state other than READY, the new
+ // policy can be swapped into place immediately. This is because there is no
+ // reason to gracefully switch from and keep using the old policy as the
+ // ClientConn is not connected to any backends.
+ if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
+ bw.gsb.swap()
+ }
+}
+
+func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+ bw.gsb.mu.Lock()
+ if !bw.gsb.balancerCurrentOrPending(bw) {
+ bw.gsb.mu.Unlock()
+ return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+ }
+ bw.gsb.mu.Unlock()
+
+ sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
+ if err != nil {
+ return nil, err
+ }
+ bw.gsb.mu.Lock()
+ if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
+ bw.gsb.cc.RemoveSubConn(sc)
+ bw.gsb.mu.Unlock()
+ return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+ }
+ bw.subconns[sc] = true
+ bw.gsb.mu.Unlock()
+ return sc, nil
+}
+
+func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
+ // Ignore ResolveNow requests from anything other than the most recent
+ // balancer, because older balancers were already removed from the config.
+ if bw != bw.gsb.latestBalancer() {
+ return
+ }
+ bw.gsb.cc.ResolveNow(opts)
+}
+
+func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+ bw.gsb.mu.Lock()
+ if !bw.gsb.balancerCurrentOrPending(bw) {
+ bw.gsb.mu.Unlock()
+ return
+ }
+ bw.gsb.mu.Unlock()
+ bw.gsb.cc.RemoveSubConn(sc)
+}
+
+func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+ bw.gsb.mu.Lock()
+ if !bw.gsb.balancerCurrentOrPending(bw) {
+ bw.gsb.mu.Unlock()
+ return
+ }
+ bw.gsb.mu.Unlock()
+ bw.gsb.cc.UpdateAddresses(sc, addrs)
+}
+
+func (bw *balancerWrapper) Target() string {
+ return bw.gsb.cc.Target()
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index 5cc3aeddb..0a25ce43f 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -31,7 +31,7 @@ import (
// Logger is the global binary logger. It can be used to get binary logger for
// each method.
type Logger interface {
- getMethodLogger(methodName string) *MethodLogger
+ GetMethodLogger(methodName string) MethodLogger
}
// binLogger is the global binary logger for the binary. One of this should be
@@ -49,17 +49,24 @@ func SetLogger(l Logger) {
binLogger = l
}
+// GetLogger gets the binarg logger.
+//
+// Only call this at init time.
+func GetLogger() Logger {
+ return binLogger
+}
+
// GetMethodLogger returns the methodLogger for the given methodName.
//
// methodName should be in the format of "/service/method".
//
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
-func GetMethodLogger(methodName string) *MethodLogger {
+func GetMethodLogger(methodName string) MethodLogger {
if binLogger == nil {
return nil
}
- return binLogger.getMethodLogger(methodName)
+ return binLogger.GetMethodLogger(methodName)
}
func init() {
@@ -68,17 +75,29 @@ func init() {
binLogger = NewLoggerFromConfigString(configStr)
}
-type methodLoggerConfig struct {
+// MethodLoggerConfig contains the setting for logging behavior of a method
+// logger. Currently, it contains the max length of header and message.
+type MethodLoggerConfig struct {
// Max length of header and message.
- hdr, msg uint64
+ Header, Message uint64
+}
+
+// LoggerConfig contains the config for loggers to create method loggers.
+type LoggerConfig struct {
+ All *MethodLoggerConfig
+ Services map[string]*MethodLoggerConfig
+ Methods map[string]*MethodLoggerConfig
+
+ Blacklist map[string]struct{}
}
type logger struct {
- all *methodLoggerConfig
- services map[string]*methodLoggerConfig
- methods map[string]*methodLoggerConfig
+ config LoggerConfig
+}
- blacklist map[string]struct{}
+// NewLoggerFromConfig builds a logger with the given LoggerConfig.
+func NewLoggerFromConfig(config LoggerConfig) Logger {
+ return &logger{config: config}
}
// newEmptyLogger creates an empty logger. The map fields need to be filled in
@@ -88,57 +107,57 @@ func newEmptyLogger() *logger {
}
// Set method logger for "*".
-func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
- if l.all != nil {
+func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
+ if l.config.All != nil {
return fmt.Errorf("conflicting global rules found")
}
- l.all = ml
+ l.config.All = ml
return nil
}
// Set method logger for "service/*".
//
// New methodLogger with same service overrides the old one.
-func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
- if _, ok := l.services[service]; ok {
+func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
+ if _, ok := l.config.Services[service]; ok {
return fmt.Errorf("conflicting service rules for service %v found", service)
}
- if l.services == nil {
- l.services = make(map[string]*methodLoggerConfig)
+ if l.config.Services == nil {
+ l.config.Services = make(map[string]*MethodLoggerConfig)
}
- l.services[service] = ml
+ l.config.Services[service] = ml
return nil
}
// Set method logger for "service/method".
//
// New methodLogger with same method overrides the old one.
-func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
- if _, ok := l.blacklist[method]; ok {
+func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
+ if _, ok := l.config.Blacklist[method]; ok {
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
}
- if _, ok := l.methods[method]; ok {
+ if _, ok := l.config.Methods[method]; ok {
return fmt.Errorf("conflicting method rules for method %v found", method)
}
- if l.methods == nil {
- l.methods = make(map[string]*methodLoggerConfig)
+ if l.config.Methods == nil {
+ l.config.Methods = make(map[string]*MethodLoggerConfig)
}
- l.methods[method] = ml
+ l.config.Methods[method] = ml
return nil
}
// Set blacklist method for "-service/method".
func (l *logger) setBlacklist(method string) error {
- if _, ok := l.blacklist[method]; ok {
+ if _, ok := l.config.Blacklist[method]; ok {
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
}
- if _, ok := l.methods[method]; ok {
+ if _, ok := l.config.Methods[method]; ok {
return fmt.Errorf("conflicting method rules for method %v found", method)
}
- if l.blacklist == nil {
- l.blacklist = make(map[string]struct{})
+ if l.config.Blacklist == nil {
+ l.config.Blacklist = make(map[string]struct{})
}
- l.blacklist[method] = struct{}{}
+ l.config.Blacklist[method] = struct{}{}
return nil
}
@@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error {
//
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
-func (l *logger) getMethodLogger(methodName string) *MethodLogger {
+func (l *logger) GetMethodLogger(methodName string) MethodLogger {
s, m, err := grpcutil.ParseMethod(methodName)
if err != nil {
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
return nil
}
- if ml, ok := l.methods[s+"/"+m]; ok {
- return newMethodLogger(ml.hdr, ml.msg)
+ if ml, ok := l.config.Methods[s+"/"+m]; ok {
+ return newMethodLogger(ml.Header, ml.Message)
}
- if _, ok := l.blacklist[s+"/"+m]; ok {
+ if _, ok := l.config.Blacklist[s+"/"+m]; ok {
return nil
}
- if ml, ok := l.services[s]; ok {
- return newMethodLogger(ml.hdr, ml.msg)
+ if ml, ok := l.config.Services[s]; ok {
+ return newMethodLogger(ml.Header, ml.Message)
}
- if l.all == nil {
+ if l.config.All == nil {
return nil
}
- return newMethodLogger(l.all.hdr, l.all.msg)
+ return newMethodLogger(l.config.All.Header, l.config.All.Message)
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
index d8f4e7602..ab589a76b 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
if err != nil {
return fmt.Errorf("invalid config: %q, %v", config, err)
}
- if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+ if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
return nil
@@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
}
if m == "*" {
- if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+ if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
} else {
- if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+ if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index 0cdb41831..24df0a1a0 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() {
var idGen callIDGenerator
// MethodLogger is the sub-logger for each method.
-type MethodLogger struct {
+type MethodLogger interface {
+ Log(LogEntryConfig)
+}
+
+type methodLogger struct {
headerMaxLen, messageMaxLen uint64
callID uint64
@@ -57,8 +61,8 @@ type MethodLogger struct {
sink Sink // TODO(blog): make this plugable.
}
-func newMethodLogger(h, m uint64) *MethodLogger {
- return &MethodLogger{
+func newMethodLogger(h, m uint64) *methodLogger {
+ return &methodLogger{
headerMaxLen: h,
messageMaxLen: m,
@@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger {
}
}
-// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *MethodLogger) Log(c LogEntryConfig) {
+// Build is an internal only method for building the proto message out of the
+// input event. It's made public to enable other library to reuse as much logic
+// in methodLogger as possible.
+func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
m := c.toProto()
timestamp, _ := ptypes.TimestampProto(time.Now())
m.Timestamp = timestamp
@@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) {
case *pb.GrpcLogEntry_Message:
m.PayloadTruncated = ml.truncateMessage(pay.Message)
}
+ return m
+}
- ml.sink.Write(m)
+// Log creates a proto binary log entry, and logs it to the sink.
+func (ml *methodLogger) Log(c LogEntryConfig) {
+ ml.sink.Write(ml.Build(c))
}
-func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
+func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
if ml.headerMaxLen == maxUInt {
return false
}
@@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
return truncated
}
-func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
+func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
if ml.messageMaxLen == maxUInt {
return false
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index cd1807543..777cbcd79 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -24,6 +24,8 @@
package channelz
import (
+ "context"
+ "errors"
"fmt"
"sort"
"sync"
@@ -49,7 +51,8 @@ var (
// TurnOn turns on channelz data collection.
func TurnOn() {
if !IsOn() {
- NewChannelzStorage()
+ db.set(newChannelMap())
+ idGen.reset()
atomic.StoreInt32(&curState, 1)
}
}
@@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap {
return d.DB
}
-// NewChannelzStorage initializes channelz data storage and id generator.
+// NewChannelzStorageForTesting initializes channelz data storage and id
+// generator for testing purposes.
//
-// This function returns a cleanup function to wait for all channelz state to be reset by the
-// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
-// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
-// to remove some entity just register by the new test, since the id space is the same.
-//
-// Note: This function is exported for testing purpose only. User should not call
-// it in most cases.
-func NewChannelzStorage() (cleanup func() error) {
- db.set(&channelMap{
- topLevelChannels: make(map[int64]struct{}),
- channels: make(map[int64]*channel),
- listenSockets: make(map[int64]*listenSocket),
- normalSockets: make(map[int64]*normalSocket),
- servers: make(map[int64]*server),
- subChannels: make(map[int64]*subChannel),
- })
+// Returns a cleanup function to be invoked by the test, which waits for up to
+// 10s for all channelz state to be reset by the grpc goroutines when those
+// entities get closed. This cleanup function helps with ensuring that tests
+// don't mess up each other.
+func NewChannelzStorageForTesting() (cleanup func() error) {
+ db.set(newChannelMap())
idGen.reset()
+
return func() error {
- var err error
cm := db.get()
if cm == nil {
return nil
}
- for i := 0; i < 1000; i++ {
- cm.mu.Lock()
- if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
- cm.mu.Unlock()
- // all things stored in the channelz map have been cleared.
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ ticker := time.NewTicker(10 * time.Millisecond)
+ defer ticker.Stop()
+ for {
+ cm.mu.RLock()
+ topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
+ cm.mu.RUnlock()
+
+ if err := ctx.Err(); err != nil {
+ return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
+ }
+ if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
return nil
}
- cm.mu.Unlock()
- time.Sleep(10 * time.Millisecond)
+ <-ticker.C
}
-
- cm.mu.Lock()
- err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
- cm.mu.Unlock()
- return err
}
}
@@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric {
return db.get().GetServer(id)
}
-// RegisterChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
-// assigned to this channel.
-func RegisterChannel(c Channel, pid int64, ref string) int64 {
+// RegisterChannel registers the given channel c in the channelz database with
+// ref as its reference name, and adds it to the child list of its parent
+// (identified by pid). pid == nil means no parent.
+//
+// Returns a unique channelz identifier assigned to this channel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
id := idGen.genID()
+ var parent int64
+ isTopChannel := true
+ if pid != nil {
+ isTopChannel = false
+ parent = pid.Int()
+ }
+
+ if !IsOn() {
+ return newIdentifer(RefChannel, id, pid)
+ }
+
cn := &channel{
refName: ref,
c: c,
subChans: make(map[int64]string),
nestedChans: make(map[int64]string),
id: id,
- pid: pid,
+ pid: parent,
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
- if pid == 0 {
- db.get().addChannel(id, cn, true, pid)
- } else {
- db.get().addChannel(id, cn, false, pid)
- }
- return id
+ db.get().addChannel(id, cn, isTopChannel, parent)
+ return newIdentifer(RefChannel, id, pid)
}
-// RegisterSubChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). It returns the unique channelz tracking id assigned to this subchannel.
-func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
- if pid == 0 {
- logger.Error("a SubChannel's parent id cannot be 0")
- return 0
+// RegisterSubChannel registers the given subChannel c in the channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by pid).
+//
+// Returns a unique channelz identifier assigned to this subChannel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
+ if pid == nil {
+ return nil, errors.New("a SubChannel's parent id cannot be nil")
}
id := idGen.genID()
+ if !IsOn() {
+ return newIdentifer(RefSubChannel, id, pid), nil
+ }
+
sc := &subChannel{
refName: ref,
c: c,
sockets: make(map[int64]string),
id: id,
- pid: pid,
+ pid: pid.Int(),
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
- db.get().addSubChannel(id, sc, pid)
- return id
+ db.get().addSubChannel(id, sc, pid.Int())
+ return newIdentifer(RefSubChannel, id, pid), nil
}
// RegisterServer registers the given server s in channelz database. It returns
// the unique channelz tracking id assigned to this server.
-func RegisterServer(s Server, ref string) int64 {
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterServer(s Server, ref string) *Identifier {
id := idGen.genID()
+ if !IsOn() {
+ return newIdentifer(RefServer, id, nil)
+ }
+
svr := &server{
refName: ref,
s: s,
@@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 {
id: id,
}
db.get().addServer(id, svr)
- return id
+ return newIdentifer(RefServer, id, nil)
}
// RegisterListenSocket registers the given listen socket s in channelz database
// with ref as its reference name, and add it to the child list of its parent
// (identified by pid). It returns the unique channelz tracking id assigned to
// this listen socket.
-func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
- if pid == 0 {
- logger.Error("a ListenSocket's parent id cannot be 0")
- return 0
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
+ if pid == nil {
+ return nil, errors.New("a ListenSocket's parent id cannot be 0")
}
id := idGen.genID()
- ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addListenSocket(id, ls, pid)
- return id
+ if !IsOn() {
+ return newIdentifer(RefListenSocket, id, pid), nil
+ }
+
+ ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
+ db.get().addListenSocket(id, ls, pid.Int())
+ return newIdentifer(RefListenSocket, id, pid), nil
}
// RegisterNormalSocket registers the given normal socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
+// with ref as its reference name, and adds it to the child list of its parent
// (identified by pid). It returns the unique channelz tracking id assigned to
// this normal socket.
-func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
- if pid == 0 {
- logger.Error("a NormalSocket's parent id cannot be 0")
- return 0
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
+ if pid == nil {
+ return nil, errors.New("a NormalSocket's parent id cannot be 0")
}
id := idGen.genID()
- ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addNormalSocket(id, ns, pid)
- return id
+ if !IsOn() {
+ return newIdentifer(RefNormalSocket, id, pid), nil
+ }
+
+ ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
+ db.get().addNormalSocket(id, ns, pid.Int())
+ return newIdentifer(RefNormalSocket, id, pid), nil
}
// RemoveEntry removes an entry with unique channelz tracking id to be id from
// channelz database.
-func RemoveEntry(id int64) {
- db.get().removeEntry(id)
+//
+// If channelz is not turned ON, this function is a no-op.
+func RemoveEntry(id *Identifier) {
+ if !IsOn() {
+ return
+ }
+ db.get().removeEntry(id.Int())
}
-// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
-// to the channel trace.
-// The Parent field is optional. It is used for event that will be recorded in the entity's parent
-// trace also.
+// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
+// the event to be added to the channel trace.
+//
+// The Parent field is optional. It is used for an event that will be recorded
+// in the entity's parent trace.
type TraceEventDesc struct {
Desc string
Severity Severity
Parent *TraceEventDesc
}
-// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
-func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
- for d := desc; d != nil; d = d.Parent {
- switch d.Severity {
- case CtUnknown, CtInfo:
- l.InfoDepth(depth+1, d.Desc)
- case CtWarning:
- l.WarningDepth(depth+1, d.Desc)
- case CtError:
- l.ErrorDepth(depth+1, d.Desc)
- }
+// AddTraceEvent adds trace related to the entity with specified id, using the
+// provided TraceEventDesc.
+//
+// If channelz is not turned ON, this will simply log the event descriptions.
+func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
+ // Log only the trace description associated with the bottom most entity.
+ switch desc.Severity {
+ case CtUnknown, CtInfo:
+ l.InfoDepth(depth+1, withParens(id)+desc.Desc)
+ case CtWarning:
+ l.WarningDepth(depth+1, withParens(id)+desc.Desc)
+ case CtError:
+ l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
}
+
if getMaxTraceEntry() == 0 {
return
}
- db.get().traceEvent(id, desc)
+ if IsOn() {
+ db.get().traceEvent(id.Int(), desc)
+ }
}
// channelMap is the storage data structure for channelz.
@@ -326,6 +367,17 @@ type channelMap struct {
normalSockets map[int64]*normalSocket
}
+func newChannelMap() *channelMap {
+ return &channelMap{
+ topLevelChannels: make(map[int64]struct{}),
+ channels: make(map[int64]*channel),
+ listenSockets: make(map[int64]*listenSocket),
+ normalSockets: make(map[int64]*normalSocket),
+ servers: make(map[int64]*server),
+ subChannels: make(map[int64]*subChannel),
+ }
+}
+
func (c *channelMap) addServer(id int64, s *server) {
c.mu.Lock()
s.cm = c
diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go
new file mode 100644
index 000000000..c9a27acd3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/id.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import "fmt"
+
+// Identifier is an opaque identifier which uniquely identifies an entity in the
+// channelz database.
+type Identifier struct {
+ typ RefChannelType
+ id int64
+ str string
+ pid *Identifier
+}
+
+// Type returns the entity type corresponding to id.
+func (id *Identifier) Type() RefChannelType {
+ return id.typ
+}
+
+// Int returns the integer identifier corresponding to id.
+func (id *Identifier) Int() int64 {
+ return id.id
+}
+
+// String returns a string representation of the entity corresponding to id.
+//
+// This includes some information about the parent as well. Examples:
+// Top-level channel: [Channel #channel-number]
+// Nested channel: [Channel #parent-channel-number Channel #channel-number]
+// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
+func (id *Identifier) String() string {
+ return id.str
+}
+
+// Equal returns true if other is the same as id.
+func (id *Identifier) Equal(other *Identifier) bool {
+ if (id != nil) != (other != nil) {
+ return false
+ }
+ if id == nil && other == nil {
+ return true
+ }
+ return id.typ == other.typ && id.id == other.id && id.pid == other.pid
+}
+
+// NewIdentifierForTesting returns a new opaque identifier to be used only for
+// testing purposes.
+func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
+ return newIdentifer(typ, id, pid)
+}
+
+func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
+ str := fmt.Sprintf("%s #%d", typ, id)
+ if pid != nil {
+ str = fmt.Sprintf("%s %s", pid, str)
+ }
+ return &Identifier{typ: typ, id: id, str: str, pid: pid}
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
index b0013f9c8..8e13a3d2c 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -26,77 +26,54 @@ import (
var logger = grpclog.Component("channelz")
+func withParens(id *Identifier) string {
+ return "[" + id.String() + "] "
+}
+
// Info logs and adds a trace event if channelz is on.
-func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
- if IsOn() {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
- Desc: fmt.Sprint(args...),
- Severity: CtInfo,
- })
- } else {
- l.InfoDepth(1, args...)
- }
+func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
+ Desc: fmt.Sprint(args...),
+ Severity: CtInfo,
+ })
}
// Infof logs and adds a trace event if channelz is on.
-func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
- if IsOn() {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
- Desc: msg,
- Severity: CtInfo,
- })
- } else {
- l.InfoDepth(1, msg)
- }
+func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
+ Desc: fmt.Sprintf(format, args...),
+ Severity: CtInfo,
+ })
}
// Warning logs and adds a trace event if channelz is on.
-func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
- if IsOn() {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
- Desc: fmt.Sprint(args...),
- Severity: CtWarning,
- })
- } else {
- l.WarningDepth(1, args...)
- }
+func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
+ Desc: fmt.Sprint(args...),
+ Severity: CtWarning,
+ })
}
// Warningf logs and adds a trace event if channelz is on.
-func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
- if IsOn() {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
- Desc: msg,
- Severity: CtWarning,
- })
- } else {
- l.WarningDepth(1, msg)
- }
+func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
+ Desc: fmt.Sprintf(format, args...),
+ Severity: CtWarning,
+ })
}
// Error logs and adds a trace event if channelz is on.
-func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
- if IsOn() {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
- Desc: fmt.Sprint(args...),
- Severity: CtError,
- })
- } else {
- l.ErrorDepth(1, args...)
- }
+func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
+ Desc: fmt.Sprint(args...),
+ Severity: CtError,
+ })
}
// Errorf logs and adds a trace event if channelz is on.
-func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
- if IsOn() {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
- Desc: msg,
- Severity: CtError,
- })
- } else {
- l.ErrorDepth(1, msg)
- }
+func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
+ AddTraceEvent(l, id, 1, &TraceEventDesc{
+ Desc: fmt.Sprintf(format, args...),
+ Severity: CtError,
+ })
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
index 3c595d154..ad0ce4dab 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
@@ -686,12 +686,33 @@ const (
type RefChannelType int
const (
+ // RefUnknown indicates an unknown entity type, the zero value for this type.
+ RefUnknown RefChannelType = iota
// RefChannel indicates the referenced entity is a Channel.
- RefChannel RefChannelType = iota
+ RefChannel
// RefSubChannel indicates the referenced entity is a SubChannel.
RefSubChannel
+ // RefServer indicates the referenced entity is a Server.
+ RefServer
+ // RefListenSocket indicates the referenced entity is a ListenSocket.
+ RefListenSocket
+ // RefNormalSocket indicates the referenced entity is a NormalSocket.
+ RefNormalSocket
)
+var refChannelTypeToString = map[RefChannelType]string{
+ RefUnknown: "Unknown",
+ RefChannel: "Channel",
+ RefSubChannel: "SubChannel",
+ RefServer: "Server",
+ RefListenSocket: "ListenSocket",
+ RefNormalSocket: "NormalSocket",
+}
+
+func (r RefChannelType) String() string {
+ return refChannelTypeToString[r]
+}
+
func (c *channelTrace) dumpData() *ChannelTrace {
c.mu.Lock()
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 9bad03cec..7d996e51b 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -26,13 +26,13 @@ import (
const (
// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
// Do not use this and read from env directly. Its value is read and kept in
- // variable BootstrapFileName.
+ // variable XDSBootstrapFileName.
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
- // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file
+ // XDSBootstrapFileContentEnv is the env variable to set bootstrap file
// content. Do not use this and read from env directly. Its value is read
- // and kept in variable BootstrapFileName.
+ // and kept in variable XDSBootstrapFileContent.
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
@@ -41,6 +41,7 @@ const (
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
+ outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
@@ -82,7 +83,10 @@ var (
// which can be disabled by setting the environment variable
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
-
+ // XDSOutlierDetection indicates whether outlier detection support is
+ // enabled, which can be enabled by setting the environment variable
+ // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true".
+ XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true")
// XDSFederation indicates whether federation support is enabled.
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 1b596bf35..6d355b0b0 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -38,11 +38,10 @@ var (
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
- // ParseServiceConfigForTesting is for creating a fake
- // ClientConn for resolver testing only
- ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
+ // ParseServiceConfig parses a JSON representation of the service config.
+ ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
// EqualServiceConfigForTesting is for testing service config generation and
- // parsing. Both a and b should be returned by ParseServiceConfigForTesting.
+ // parsing. Both a and b should be returned by ParseServiceConfig.
// This function compares the config without rawJSON stripped, in case the
// there's difference in white space.
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
@@ -86,3 +85,9 @@ const (
// that supports backend returned by grpclb balancer.
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
)
+
+// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
+//
+// It currently has an experimental suffix which would be removed once
+// end-to-end testing of the policy is completed.
+const RLSLoadBalancingPolicyName = "rls_experimental"
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
index b8733dbf3..b2980f8ac 100644
--- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -22,6 +22,9 @@
package metadata
import (
+ "fmt"
+ "strings"
+
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
)
@@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
return addr
}
+
+// Validate returns an error if the input md contains invalid keys or values.
+//
+// If the header is not a pseudo-header, the following items are checked:
+// - header names must contain one or more characters from this set [0-9 a-z _ - .].
+// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
+// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
+func Validate(md metadata.MD) error {
+ for k, vals := range md {
+ // pseudo-header will be ignored
+ if k[0] == ':' {
+ continue
+ }
+ // check key, for i that saving a conversion if not using for range
+ for i := 0; i < len(k); i++ {
+ r := k[i]
+ if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
+ return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
+ }
+ }
+ if strings.HasSuffix(k, "-bin") {
+ continue
+ }
+ // check value
+ for _, val := range vals {
+ if hasNotPrintable(val) {
+ return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
+ }
+ }
+ }
+ return nil
+}
+
+// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
+func hasNotPrintable(msg string) bool {
+ // for i that saving a conversion if not using for range
+ for i := 0; i < len(msg); i++ {
+ if msg[i] < 0x20 || msg[i] > 0x7E {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
new file mode 100644
index 000000000..0177af4b5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
@@ -0,0 +1,82 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pretty defines helper functions to pretty-print structs for logging.
+package pretty
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/golang/protobuf/jsonpb"
+ protov1 "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/encoding/protojson"
+ protov2 "google.golang.org/protobuf/proto"
+)
+
+const jsonIndent = " "
+
+// ToJSON marshals the input into a json string.
+//
+// If marshal fails, it falls back to fmt.Sprintf("%+v").
+func ToJSON(e interface{}) string {
+ switch ee := e.(type) {
+ case protov1.Message:
+ mm := jsonpb.Marshaler{Indent: jsonIndent}
+ ret, err := mm.MarshalToString(ee)
+ if err != nil {
+ // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
+ // messages are not imported, and this will fail because the message
+ // is not found.
+ return fmt.Sprintf("%+v", ee)
+ }
+ return ret
+ case protov2.Message:
+ mm := protojson.MarshalOptions{
+ Multiline: true,
+ Indent: jsonIndent,
+ }
+ ret, err := mm.Marshal(ee)
+ if err != nil {
+ // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
+ // messages are not imported, and this will fail because the message
+ // is not found.
+ return fmt.Sprintf("%+v", ee)
+ }
+ return string(ret)
+ default:
+ ret, err := json.MarshalIndent(ee, "", jsonIndent)
+ if err != nil {
+ return fmt.Sprintf("%+v", ee)
+ }
+ return string(ret)
+ }
+}
+
+// FormatJSON formats the input json bytes with indentation.
+//
+// If Indent fails, it returns the unchanged input as string.
+func FormatJSON(b []byte) string {
+ var out bytes.Buffer
+ err := json.Indent(&out, b, "", jsonIndent)
+ if err != nil {
+ return string(b)
+ }
+ return out.String()
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index 8394d252d..244f4b081 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -137,6 +137,7 @@ type earlyAbortStream struct {
streamID uint32
contentSubtype string
status *status.Status
+ rst bool
}
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
@@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
return err
}
+ if eas.rst {
+ if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
+ return err
+ }
+ }
return nil
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index f0c72d337..24ca59084 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -132,7 +132,7 @@ type http2Client struct {
kpDormant bool
// Fields below are for channelz metric collection.
- channelzID int64 // channelz unique identification number
+ channelzID *channelz.Identifier
czData *channelzData
onGoAway func(GoAwayReason)
@@ -351,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
t.statsHandler.HandleConn(t.ctx, connBegin)
}
- if channelz.IsOn() {
- t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
+ t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
+ if err != nil {
+ return nil, err
}
if t.keepaliveEnabled {
t.kpDormancyCond = sync.NewCond(&t.mu)
@@ -630,8 +631,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
// the wire. However, there are two notable exceptions:
//
// 1. If the stream headers violate the max header list size allowed by the
-// server. In this case there is no reason to retry at all, as it is
-// assumed the RPC would continue to fail on subsequent attempts.
+// server. It's possible this could succeed on another transport, even if
+// it's unlikely, but do not transparently retry.
// 2. If the credentials errored when requesting their headers. In this case,
// it's possible a retry can fix the problem, but indefinitely transparently
// retrying is not appropriate as it is likely the credentials, if they can
@@ -639,8 +640,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
type NewStreamError struct {
Err error
- DoNotRetry bool
- DoNotTransparentRetry bool
+ AllowTransparentRetry bool
}
func (e NewStreamError) Error() string {
@@ -649,11 +649,11 @@ func (e NewStreamError) Error() string {
// NewStream creates a stream and registers it into the transport as "active"
// streams. All non-nil errors returned will be *NewStreamError.
-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
ctx = peer.NewContext(ctx, t.getPeer())
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
- return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true}
+ return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
}
s := t.newStream(ctx, callHdr)
cleanup := func(err error) {
@@ -753,13 +753,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return true
}, hdr)
if err != nil {
- return nil, &NewStreamError{Err: err}
+ // Connection closed.
+ return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
}
if success {
break
}
if hdrListSizeErr != nil {
- return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true}
+ return nil, &NewStreamError{Err: hdrListSizeErr}
}
firstTry = false
select {
@@ -767,9 +768,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
case <-ctx.Done():
return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
case <-t.goAway:
- return nil, &NewStreamError{Err: errStreamDrain}
+ return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
case <-t.ctx.Done():
- return nil, &NewStreamError{Err: ErrConnClosing}
+ return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
}
}
if t.statsHandler != nil {
@@ -898,9 +899,7 @@ func (t *http2Client) Close(err error) {
t.controlBuf.finish()
t.cancel()
t.conn.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(t.channelzID)
- }
+ channelz.RemoveEntry(t.channelzID)
// Append info about previous goaways if there were any, since this may be important
// for understanding the root cause for this connection to be closed.
_, goAwayDebugMessage := t.GetGoAwayReason()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 2c6eaf0e5..45d7bd145 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -21,7 +21,6 @@ package transport
import (
"bytes"
"context"
- "errors"
"fmt"
"io"
"math"
@@ -36,6 +35,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@@ -52,10 +52,10 @@ import (
var (
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
// the stream's state.
- ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
+ ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
// than the limit set by peer.
- ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
+ ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
)
// serverConnectionCounter counts the number of connections a server has seen
@@ -117,7 +117,7 @@ type http2Server struct {
idle time.Time
// Fields below are for channelz metric collection.
- channelzID int64 // channelz unique identification number
+ channelzID *channelz.Identifier
czData *channelzData
bufferPool *bufferPool
@@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
if kp.Timeout == 0 {
kp.Timeout = defaultServerKeepaliveTimeout
}
+ if kp.Time != infinity {
+ if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+ return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
+ }
+ }
kep := config.KeepalivePolicy
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
@@ -275,12 +280,12 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
connBegin := &stats.ConnBegin{}
t.stats.HandleConn(t.ctx, connBegin)
}
- if channelz.IsOn() {
- t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
+ t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
+ if err != nil {
+ return nil, err
}
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
-
t.framer.writer.Flush()
defer func() {
@@ -443,6 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.New(codes.Internal, errMsg),
+ rst: !frame.StreamEnded(),
})
return false
}
@@ -516,14 +522,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
if httpMethod != http.MethodPost {
t.mu.Unlock()
+ errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
if logger.V(logLevel) {
- logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
+ logger.Infof("transport: %v", errMsg)
}
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: http2.ErrCodeProtocol,
- onWrite: func() {},
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: 405,
+ streamID: streamID,
+ contentSubtype: s.contentSubtype,
+ status: status.New(codes.Internal, errMsg),
+ rst: !frame.StreamEnded(),
})
s.cancel()
return false
@@ -544,6 +552,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
streamID: s.id,
contentSubtype: s.contentSubtype,
status: stat,
+ rst: !frame.StreamEnded(),
})
return false
}
@@ -925,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
return true
}
+func (t *http2Server) streamContextErr(s *Stream) error {
+ select {
+ case <-t.done:
+ return ErrConnClosing
+ default:
+ }
+ return ContextErr(s.ctx.Err())
+}
+
// WriteHeader sends the header metadata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
- if s.updateHeaderSent() || s.getState() == streamDone {
+ if s.updateHeaderSent() {
return ErrIllegalHeaderWrite
}
+
+ if s.getState() == streamDone {
+ return t.streamContextErr(s)
+ }
+
s.hdrMu.Lock()
if md.Len() > 0 {
if s.header.Len() > 0 {
@@ -940,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
}
if err := t.writeHeaderLocked(s); err != nil {
s.hdrMu.Unlock()
- return err
+ return status.Convert(err).Err()
}
s.hdrMu.Unlock()
return nil
@@ -1056,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
- if _, ok := err.(ConnectionError); ok {
- return err
- }
- // TODO(mmukhi, dfawley): Make sure this is the right code to return.
- return status.Errorf(codes.Internal, "transport: %v", err)
+ return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
- // TODO(mmukhi, dfawley): Should the server write also return io.EOF?
- s.cancel()
- select {
- case <-t.done:
- return ErrConnClosing
- default:
- }
- return ContextErr(s.ctx.Err())
+ return t.streamContextErr(s)
}
}
df := &dataFrame{
@@ -1082,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
onEachWrite: t.setResetPingStrikes,
}
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
- select {
- case <-t.done:
- return ErrConnClosing
- default:
- }
- return ContextErr(s.ctx.Err())
+ return t.streamContextErr(s)
}
return t.controlBuf.put(df)
}
@@ -1210,9 +1217,7 @@ func (t *http2Server) Close() {
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
logger.Infof("transport: error closing conn during Close: %v", err)
}
- if channelz.IsOn() {
- channelz.RemoveEntry(t.channelzID)
- }
+ channelz.RemoveEntry(t.channelzID)
// Cancel all active streams.
for _, s := range streams {
s.cancel()
@@ -1225,10 +1230,6 @@ func (t *http2Server) Close() {
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
- // In case stream sending and receiving are invoked in separate
- // goroutines (e.g., bi-directional streaming), cancel needs to be
- // called to interrupt the potential blocking on other goroutines.
- s.cancel()
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
@@ -1250,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+ // In case stream sending and receiving are invoked in separate
+ // goroutines (e.g., bi-directional streaming), cancel needs to be
+ // called to interrupt the potential blocking on other goroutines.
+ s.cancel()
+
oldState := s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
@@ -1269,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
// closeStream clears the footprint of a stream when the stream is not needed any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+ // In case stream sending and receiving are invoked in separate
+ // goroutines (e.g., bi-directional streaming), cancel needs to be
+ // called to interrupt the potential blocking on other goroutines.
+ s.cancel()
+
s.swapState(streamDone)
t.deleteStream(s, eosReceived)
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index d3bf65b2b..a9ce717f1 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -34,6 +34,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
@@ -529,7 +530,7 @@ type ServerConfig struct {
InitialConnWindowSize int32
WriteBufferSize int
ReadBufferSize int
- ChannelzParentID int64
+ ChannelzParentID *channelz.Identifier
MaxHeaderListSize *uint32
HeaderTableSize *uint32
}
@@ -563,7 +564,7 @@ type ConnectOptions struct {
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
ReadBufferSize int
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
- ChannelzParentID int64
+ ChannelzParentID *channelz.Identifier
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
MaxHeaderListSize *uint32
// UseProxy specifies if a proxy should be used.
@@ -741,6 +742,12 @@ func (e ConnectionError) Origin() error {
return e.err
}
+// Unwrap returns the original error of this connection error or nil when the
+// origin is nil.
+func (e ConnectionError) Unwrap() error {
+ return e.err
+}
+
var (
// ErrConnClosing indicates that the transport is closing.
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index 3604c7819..8e0f6abe8 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
// map, and there's no guarantee that the MD attached to the context is
// created using our helper functions.
key := strings.ToLower(k)
- out[key] = v
+ s := make([]string, len(v))
+ copy(s, v)
+ out[key] = s
}
return out, true
}
@@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) {
// map, and there's no guarantee that the MD attached to the context is
// created using our helper functions.
key := strings.ToLower(k)
- out[key] = v
+ s := make([]string, len(v))
+ copy(s, v)
+ out[key] = s
}
for _, added := range raw.added {
if len(added)%2 == 1 {
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index e8367cb89..843633c91 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
if _, ok := status.FromError(err); ok {
// Status error: end the RPC unconditionally with this status.
- return nil, nil, err
+ return nil, nil, dropError{error: err}
}
// For all other errors, wait for ready RPCs should block and other
// RPCs should fail with unavailable.
@@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() {
pw.done = true
close(pw.blockingCh)
}
+
+// dropError is a wrapper error that indicates the LB policy wishes to drop the
+// RPC and not retry it.
+type dropError struct {
+ error
+}
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index 5168b62b0..fb7a99e0a 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string {
}
type pickfirstBalancer struct {
- state connectivity.State
- cc balancer.ClientConn
- sc balancer.SubConn
+ state connectivity.State
+ cc balancer.ClientConn
+ subConn balancer.SubConn
}
func (b *pickfirstBalancer) ResolverError(err error) {
- switch b.state {
- case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
- // Set a failing picker if we don't have a good picker.
- b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
- })
- }
if logger.V(2) {
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
}
+ if b.subConn == nil {
+ b.state = connectivity.TransientFailure
+ }
+
+ if b.state != connectivity.TransientFailure {
+ // The picker will not change since the balancer does not currently
+ // report an error.
+ return
+ }
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
+ })
}
-func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
- if len(cs.ResolverState.Addresses) == 0 {
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ if len(state.ResolverState.Addresses) == 0 {
+ // The resolver reported an empty address list. Treat it like an error by
+ // calling b.ResolverError.
+ if b.subConn != nil {
+ // Remove the old subConn. All addresses were removed, so it is no longer
+ // valid.
+ b.cc.RemoveSubConn(b.subConn)
+ b.subConn = nil
+ }
b.ResolverError(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
- if b.sc == nil {
- var err error
- b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
- if err != nil {
- if logger.V(2) {
- logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
- }
- b.state = connectivity.TransientFailure
- b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
- })
- return balancer.ErrBadResolverState
+
+ if b.subConn != nil {
+ b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
+ return nil
+ }
+
+ subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
+ if err != nil {
+ if logger.V(2) {
+ logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
}
- b.state = connectivity.Idle
- b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
- b.sc.Connect()
- } else {
- b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses)
- b.sc.Connect()
+ b.state = connectivity.TransientFailure
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
+ })
+ return balancer.ErrBadResolverState
}
+ b.subConn = subConn
+ b.state = connectivity.Idle
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Idle,
+ Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
+ })
+ b.subConn.Connect()
return nil
}
-func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
if logger.V(2) {
- logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)
+ logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state)
}
- if b.sc != sc {
+ if b.subConn != subConn {
if logger.V(2) {
- logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+ logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized")
}
return
}
- b.state = s.ConnectivityState
- if s.ConnectivityState == connectivity.Shutdown {
- b.sc = nil
+ b.state = state.ConnectivityState
+ if state.ConnectivityState == connectivity.Shutdown {
+ b.subConn = nil
return
}
- switch s.ConnectivityState {
+ switch state.ConnectivityState {
case connectivity.Ready:
- b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: state.ConnectivityState,
+ Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
+ })
case connectivity.Connecting:
- b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: state.ConnectivityState,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
case connectivity.Idle:
- b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}})
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: state.ConnectivityState,
+ Picker: &idlePicker{subConn: subConn},
+ })
case connectivity.TransientFailure:
b.cc.UpdateState(balancer.State{
- ConnectivityState: s.ConnectivityState,
- Picker: &picker{err: s.ConnectionError},
+ ConnectivityState: state.ConnectivityState,
+ Picker: &picker{err: state.ConnectionError},
})
}
}
@@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() {
}
func (b *pickfirstBalancer) ExitIdle() {
- if b.sc != nil && b.state == connectivity.Idle {
- b.sc.Connect()
+ if b.subConn != nil && b.state == connectivity.Idle {
+ b.subConn.Connect()
}
}
@@ -135,18 +163,18 @@ type picker struct {
err error
}
-func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return p.result, p.err
}
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
// CONNECTING when Pick is called.
type idlePicker struct {
- sc balancer.SubConn
+ subConn balancer.SubConn
}
-func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
- i.sc.Connect()
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ i.subConn.Connect()
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
index 58c802f8a..978b89f37 100644
--- a/vendor/google.golang.org/grpc/regenerate.sh
+++ b/vendor/google.golang.org/grpc/regenerate.sh
@@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH}
mkdir -p ${GOBIN}
echo "remove existing generated files"
-# grpc_testingv3/testv3.pb.go is not re-generated because it was
-# intentionally generated by an older version of protoc-gen-go.
-rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go')
+# grpc_testing_not_regenerate/*.pb.go is not re-generated,
+# see grpc_testing_not_regenerate/README.md for details.
+rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
@@ -117,9 +117,9 @@ done
mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-# grpc_testingv3/testv3.pb.go is not re-generated because it was
-# intentionally generated by an older version of protoc-gen-go.
-rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go
+# grpc_testing_not_regenerate/*.pb.go are not re-generated,
+# see grpc_testing_not_regenerate/README.md for details.
+rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
# grpc/service_config/service_config.proto does not have a go_package option.
mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index e28b68026..ca2e35a35 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -27,6 +27,7 @@ import (
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/serviceconfig"
)
@@ -139,13 +140,18 @@ type Address struct {
// Equal returns whether a and o are identical. Metadata is compared directly,
// not with any recursive introspection.
-func (a *Address) Equal(o Address) bool {
+func (a Address) Equal(o Address) bool {
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
a.Attributes.Equal(o.Attributes) &&
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
a.Type == o.Type && a.Metadata == o.Metadata
}
+// String returns JSON formatted string representation of the address.
+func (a Address) String() string {
+ return pretty.ToJSON(a)
+}
+
// BuildOptions includes additional information for the builder to create
// the resolver.
type BuildOptions struct {
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index 2c47cd54f..05a9d4e0b 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -19,7 +19,6 @@
package grpc
import (
- "fmt"
"strings"
"sync"
@@ -27,6 +26,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
@@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
if ccr.done.HasFired() {
return nil
}
- channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
- if channelz.IsOn() {
- ccr.addChannelzTraceEvent(s)
- }
+ ccr.addChannelzTraceEvent(s)
ccr.curState = s
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
return balancer.ErrBadResolverState
@@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
if ccr.done.HasFired() {
return
}
- channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
- if channelz.IsOn() {
- ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
- }
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
ccr.curState.Addresses = addrs
ccr.cc.updateResolverState(ccr.curState, nil)
}
@@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
if ccr.done.HasFired() {
return
}
- channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
+ channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
if ccr.cc.dopts.disableServiceConfig {
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
return
@@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
return
}
- if channelz.IsOn() {
- ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
- }
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
ccr.curState.ServiceConfig = scpr
ccr.cc.updateResolverState(ccr.curState, nil)
}
@@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
updates = append(updates, "resolver returned new addresses")
}
- channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
- Severity: channelz.CtInfo,
- })
+ channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index eadf9e05f..65de84b30 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -134,7 +134,7 @@ type Server struct {
channelzRemoveOnce sync.Once
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
- channelzID int64 // channelz unique identification number
+ channelzID *channelz.Identifier
czData *channelzData
serverWorkerChannels []chan *serverWorkerData
@@ -584,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server {
s.initServerWorkers()
}
- if channelz.IsOn() {
- s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
- }
+ s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
+ channelz.Info(logger, s.channelzID, "Server created")
return s
}
@@ -712,7 +711,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped")
type listenSocket struct {
net.Listener
- channelzID int64
+ channelzID *channelz.Identifier
}
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
@@ -724,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
func (l *listenSocket) Close() error {
err := l.Listener.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(l.channelzID)
- }
+ channelz.RemoveEntry(l.channelzID)
+ channelz.Info(logger, l.channelzID, "ListenSocket deleted")
return err
}
@@ -759,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error {
ls := &listenSocket{Listener: lis}
s.lis[ls] = true
- if channelz.IsOn() {
- ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
- }
- s.mu.Unlock()
-
defer func() {
s.mu.Lock()
if s.lis != nil && s.lis[ls] {
@@ -773,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error {
s.mu.Unlock()
}()
- var tempDelay time.Duration // how long to sleep on accept failure
+ var err error
+ ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
+ if err != nil {
+ s.mu.Unlock()
+ return err
+ }
+ s.mu.Unlock()
+ channelz.Info(logger, ls.channelzID, "ListenSocket created")
+ var tempDelay time.Duration // how long to sleep on accept failure
for {
rawConn, err := lis.Accept()
if err != nil {
@@ -1283,9 +1284,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
- // Convert appErr if it is not a grpc status error.
- appErr = status.Error(codes.Unknown, appErr.Error())
- appStatus, _ = status.FromError(appErr)
+ // Convert non-status application error to a status error with code
+ // Unknown, but handle context errors specifically.
+ appStatus = status.FromContextError(appErr)
+ appErr = appStatus.Err()
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
@@ -1549,7 +1551,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
- appStatus = status.New(codes.Unknown, appErr.Error())
+ // Convert non-status application error to a status error with code
+ // Unknown, but handle context errors specifically.
+ appStatus = status.FromContextError(appErr)
appErr = appStatus.Err()
}
if trInfo != nil {
@@ -1706,11 +1710,7 @@ func (s *Server) Stop() {
s.done.Fire()
}()
- s.channelzRemoveOnce.Do(func() {
- if channelz.IsOn() {
- channelz.RemoveEntry(s.channelzID)
- }
- })
+ s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
s.mu.Lock()
listeners := s.lis
@@ -1748,11 +1748,7 @@ func (s *Server) GracefulStop() {
s.quit.Fire()
defer s.done.Fire()
- s.channelzRemoveOnce.Do(func() {
- if channelz.IsOn() {
- channelz.RemoveEntry(s.channelzID)
- }
- })
+ s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
s.mu.Lock()
if s.conns == nil {
s.mu.Unlock()
@@ -1805,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
return codec
}
-// SetHeader sets the header metadata.
-// When called multiple times, all the provided metadata will be merged.
-// All the metadata will be sent out when one of the following happens:
-// - grpc.SendHeader() is called;
-// - The first response is sent out;
-// - An RPC status is sent out (error or success).
+// SetHeader sets the header metadata to be sent from the server to the client.
+// The context provided must be the context passed to the server's handler.
+//
+// Streaming RPCs should prefer the SetHeader method of the ServerStream.
+//
+// When called multiple times, all the provided metadata will be merged. All
+// the metadata will be sent out when one of the following happens:
+//
+// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
+// - The first response message is sent. For unary handlers, this occurs when
+// the handler returns; for streaming handlers, this can happen when stream's
+// SendMsg method is called.
+// - An RPC status is sent out (error or success). This occurs when the handler
+// returns.
+//
+// SetHeader will fail if called after any of the events above.
+//
+// The error returned is compatible with the status package. However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
func SetHeader(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 {
return nil
@@ -1822,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
return stream.SetHeader(md)
}
-// SendHeader sends header metadata. It may be called at most once.
-// The provided md and headers set by SetHeader() will be sent.
+// SendHeader sends header metadata. It may be called at most once, and may not
+// be called after any event that causes headers to be sent (see SetHeader for
+// a complete list). The provided md and headers set by SetHeader() will be
+// sent.
+//
+// The error returned is compatible with the status package. However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
func SendHeader(ctx context.Context, md metadata.MD) error {
stream := ServerTransportStreamFromContext(ctx)
if stream == nil {
@@ -1837,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
// When called more than once, all the provided metadata will be merged.
+//
+// The error returned is compatible with the status package. However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
func SetTrailer(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 {
return nil
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 22c4240cf..b01c548bb 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -218,7 +218,7 @@ type jsonSC struct {
}
func init() {
- internal.ParseServiceConfigForTesting = parseServiceConfig
+ internal.ParseServiceConfig = parseServiceConfig
}
func parseServiceConfig(js string) *serviceconfig.ParseResult {
if len(js) == 0 {
@@ -381,6 +381,9 @@ func init() {
//
// If any of them is NOT *ServiceConfig, return false.
func equalServiceConfig(a, b serviceconfig.Config) bool {
+ if a == nil && b == nil {
+ return true
+ }
aa, ok := a.(*ServiceConfig)
if !ok {
return false
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 625d47b34..236fc17ec 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -36,6 +36,7 @@ import (
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcutil"
+ imetadata "google.golang.org/grpc/internal/metadata"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/serviceconfig"
"google.golang.org/grpc/internal/transport"
@@ -46,10 +47,12 @@ import (
)
// StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC. If a StreamHandler returns an error, it
-// should be produced by the status package, or else gRPC will use
-// codes.Unknown as the status code and err.Error() as the status message
-// of the RPC.
+// execution of a streaming RPC.
+//
+// If a StreamHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
type StreamHandler func(srv interface{}, stream ServerStream) error
// StreamDesc represents a streaming RPC service's method specification. Used
@@ -164,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+ if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+ if err := imetadata.Validate(md); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ }
if channelz.IsOn() {
cc.incrCallsStarted()
defer func() {
@@ -295,14 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
}
cs.binlog = binarylog.GetMethodLogger(method)
- if err := cs.newAttemptLocked(false /* isTransparent */); err != nil {
+ cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */)
+ if err != nil {
cs.finish(err)
return nil, err
}
- op := func(a *csAttempt) error { return a.newStream() }
+ // Pick the transport to use and create a new stream on the transport.
+ // Assign cs.attempt upon success.
+ op := func(a *csAttempt) error {
+ if err := a.getTransport(); err != nil {
+ return err
+ }
+ if err := a.newStream(); err != nil {
+ return err
+ }
+ // Because this operation is always called either here (while creating
+ // the clientStream) or by the retry code while locked when replaying
+ // the operation, it is safe to access cs.attempt directly.
+ cs.attempt = a
+ return nil
+ }
if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
- cs.finish(err)
return nil, err
}
@@ -341,9 +363,15 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
return cs, nil
}
-// newAttemptLocked creates a new attempt with a transport.
-// If it succeeds, then it replaces clientStream's attempt with this new attempt.
-func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
+// newAttemptLocked creates a new csAttempt without a transport or stream.
+func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
+ if err := cs.ctx.Err(); err != nil {
+ return nil, toRPCErr(err)
+ }
+ if err := cs.cc.ctx.Err(); err != nil {
+ return nil, ErrClientConnClosing
+ }
+
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
method := cs.callHdr.Method
sh := cs.cc.dopts.copts.StatsHandler
@@ -377,44 +405,39 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
ctx = trace.NewContext(ctx, trInfo.tr)
}
- newAttempt := &csAttempt{
+ if cs.cc.parsedTarget.Scheme == "xds" {
+ // Add extra metadata (metadata that will be added by transport) to context
+ // so the balancer can see them.
+ ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
+ "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
+ ))
+ }
+
+ return &csAttempt{
ctx: ctx,
beginTime: beginTime,
cs: cs,
dc: cs.cc.dopts.dc,
statsHandler: sh,
trInfo: trInfo,
- }
- defer func() {
- if retErr != nil {
- // This attempt is not set in the clientStream, so it's finish won't
- // be called. Call it here for stats and trace in case they are not
- // nil.
- newAttempt.finish(retErr)
- }
- }()
+ }, nil
+}
- if err := ctx.Err(); err != nil {
- return toRPCErr(err)
- }
+func (a *csAttempt) getTransport() error {
+ cs := a.cs
- if cs.cc.parsedTarget.Scheme == "xds" {
- // Add extra metadata (metadata that will be added by transport) to context
- // so the balancer can see them.
- ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
- "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
- ))
- }
- t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
+ var err error
+ a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
if err != nil {
+ if de, ok := err.(dropError); ok {
+ err = de.error
+ a.drop = true
+ }
return err
}
- if trInfo != nil {
- trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
+ if a.trInfo != nil {
+ a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
}
- newAttempt.t = t
- newAttempt.done = done
- cs.attempt = newAttempt
return nil
}
@@ -423,12 +446,21 @@ func (a *csAttempt) newStream() error {
cs.callHdr.PreviousAttempts = cs.numRetries
s, err := a.t.NewStream(a.ctx, cs.callHdr)
if err != nil {
- // Return without converting to an RPC error so retry code can
- // inspect.
- return err
+ nse, ok := err.(*transport.NewStreamError)
+ if !ok {
+ // Unexpected.
+ return err
+ }
+
+ if nse.AllowTransparentRetry {
+ a.allowTransparentRetry = true
+ }
+
+ // Unwrap and convert error.
+ return toRPCErr(nse.Err)
}
- cs.attempt.s = s
- cs.attempt.p = &parser{r: s}
+ a.s = s
+ a.p = &parser{r: s}
return nil
}
@@ -454,7 +486,7 @@ type clientStream struct {
retryThrottler *retryThrottler // The throttler active when the RPC began.
- binlog *binarylog.MethodLogger // Binary logger, can be nil.
+ binlog binarylog.MethodLogger // Binary logger, can be nil.
// serverHeaderBinlogged is a boolean for whether server header has been
// logged. Server header will be logged when the first time one of those
// happens: stream.Header(), stream.Recv().
@@ -506,6 +538,11 @@ type csAttempt struct {
statsHandler stats.Handler
beginTime time.Time
+
+ // set for newStream errors that may be transparently retried
+ allowTransparentRetry bool
+ // set for pick errors that are returned as a status
+ drop bool
}
func (cs *clientStream) commitAttemptLocked() {
@@ -525,41 +562,21 @@ func (cs *clientStream) commitAttempt() {
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
// the error that should be returned by the operation. If the RPC should be
// retried, the bool indicates whether it is being retried transparently.
-func (cs *clientStream) shouldRetry(err error) (bool, error) {
- if cs.attempt.s == nil {
- // Error from NewClientStream.
- nse, ok := err.(*transport.NewStreamError)
- if !ok {
- // Unexpected, but assume no I/O was performed and the RPC is not
- // fatal, so retry indefinitely.
- return true, nil
- }
-
- // Unwrap and convert error.
- err = toRPCErr(nse.Err)
-
- // Never retry DoNotRetry errors, which indicate the RPC should not be
- // retried due to max header list size violation, etc.
- if nse.DoNotRetry {
- return false, err
- }
+func (a *csAttempt) shouldRetry(err error) (bool, error) {
+ cs := a.cs
- // In the event of a non-IO operation error from NewStream, we never
- // attempted to write anything to the wire, so we can retry
- // indefinitely.
- if !nse.DoNotTransparentRetry {
- return true, nil
- }
- }
- if cs.finished || cs.committed {
- // RPC is finished or committed; cannot retry.
+ if cs.finished || cs.committed || a.drop {
+ // RPC is finished or committed or was dropped by the picker; cannot retry.
return false, err
}
+ if a.s == nil && a.allowTransparentRetry {
+ return true, nil
+ }
// Wait for the trailers.
unprocessed := false
- if cs.attempt.s != nil {
- <-cs.attempt.s.Done()
- unprocessed = cs.attempt.s.Unprocessed()
+ if a.s != nil {
+ <-a.s.Done()
+ unprocessed = a.s.Unprocessed()
}
if cs.firstAttempt && unprocessed {
// First attempt, stream unprocessed: transparently retry.
@@ -571,14 +588,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
pushback := 0
hasPushback := false
- if cs.attempt.s != nil {
- if !cs.attempt.s.TrailersOnly() {
+ if a.s != nil {
+ if !a.s.TrailersOnly() {
return false, err
}
// TODO(retry): Move down if the spec changes to not check server pushback
// before considering this a failure for throttling.
- sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
+ sps := a.s.Trailer()["grpc-retry-pushback-ms"]
if len(sps) == 1 {
var e error
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
@@ -595,10 +612,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
}
var code codes.Code
- if cs.attempt.s != nil {
- code = cs.attempt.s.Status().Code()
+ if a.s != nil {
+ code = a.s.Status().Code()
} else {
- code = status.Convert(err).Code()
+ code = status.Code(err)
}
rp := cs.methodConfig.RetryPolicy
@@ -643,19 +660,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
}
// Returns nil if a retry was performed and succeeded; error otherwise.
-func (cs *clientStream) retryLocked(lastErr error) error {
+func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
for {
- cs.attempt.finish(toRPCErr(lastErr))
- isTransparent, err := cs.shouldRetry(lastErr)
+ attempt.finish(toRPCErr(lastErr))
+ isTransparent, err := attempt.shouldRetry(lastErr)
if err != nil {
cs.commitAttemptLocked()
return err
}
cs.firstAttempt = false
- if err := cs.newAttemptLocked(isTransparent); err != nil {
+ attempt, err = cs.newAttemptLocked(isTransparent)
+ if err != nil {
+ // Only returns error if the clientconn is closed or the context of
+ // the stream is canceled.
return err
}
- if lastErr = cs.replayBufferLocked(); lastErr == nil {
+ // Note that the first op in the replay buffer always sets cs.attempt
+ // if it is able to pick a transport and create a stream.
+ if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
return nil
}
}
@@ -665,7 +687,10 @@ func (cs *clientStream) Context() context.Context {
cs.commitAttempt()
// No need to lock before using attempt, since we know it is committed and
// cannot change.
- return cs.attempt.s.Context()
+ if cs.attempt.s != nil {
+ return cs.attempt.s.Context()
+ }
+ return cs.ctx
}
func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
@@ -695,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
cs.mu.Unlock()
return err
}
- if err := cs.retryLocked(err); err != nil {
+ if err := cs.retryLocked(a, err); err != nil {
cs.mu.Unlock()
return err
}
@@ -726,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
cs.binlog.Log(logEntry)
cs.serverHeaderBinlogged = true
}
- return m, err
+ return m, nil
}
func (cs *clientStream) Trailer() metadata.MD {
@@ -744,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD {
return cs.attempt.s.Trailer()
}
-func (cs *clientStream) replayBufferLocked() error {
- a := cs.attempt
+func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
for _, f := range cs.buffer {
- if err := f(a); err != nil {
+ if err := f(attempt); err != nil {
return err
}
}
@@ -795,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
if len(payload) > *cs.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
}
- msgBytes := data // Store the pointer before setting to nil. For binary logging.
op := func(a *csAttempt) error {
- err := a.sendMsg(m, hdr, payload, data)
- // nil out the message and uncomp when replaying; they are only needed for
- // stats which is disabled for subsequent attempts.
- m, data = nil, nil
- return err
+ return a.sendMsg(m, hdr, payload, data)
}
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
if cs.binlog != nil && err == nil {
cs.binlog.Log(&binarylog.ClientMessage{
OnClientSide: true,
- Message: msgBytes,
+ Message: data,
})
}
- return
+ return err
}
func (cs *clientStream) RecvMsg(m interface{}) error {
@@ -1362,8 +1381,10 @@ func (as *addrConnStream) finish(err error) {
// ServerStream defines the server-side behavior of a streaming RPC.
//
-// All errors returned from ServerStream methods are compatible with the
-// status package.
+// Errors returned from ServerStream methods are compatible with the status
+// package. However, the status code will often not match the RPC status as
+// seen by the client application, and therefore, should not be relied upon for
+// this purpose.
type ServerStream interface {
// SetHeader sets the header metadata. It may be called multiple times.
// When call multiple times, all the provided metadata will be merged.
@@ -1426,7 +1447,7 @@ type serverStream struct {
statsHandler stats.Handler
- binlog *binarylog.MethodLogger
+ binlog binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
// will happen when one of the following two happens: stream.SendHeader(),
// stream.Send().
@@ -1446,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error {
if md.Len() == 0 {
return nil
}
+ err := imetadata.Validate(md)
+ if err != nil {
+ return status.Error(codes.Internal, err.Error())
+ }
return ss.s.SetHeader(md)
}
func (ss *serverStream) SendHeader(md metadata.MD) error {
- err := ss.t.WriteHeader(ss.s, md)
+ err := imetadata.Validate(md)
+ if err != nil {
+ return status.Error(codes.Internal, err.Error())
+ }
+
+ err = ss.t.WriteHeader(ss.s, md)
if ss.binlog != nil && !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
ss.binlog.Log(&binarylog.ServerHeader{
@@ -1465,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
if md.Len() == 0 {
return
}
+ if err := imetadata.Validate(md); err != nil {
+ logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
+ }
ss.s.SetTrailer(md)
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 9d3fd73da..5bc03f9b3 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.44.1-dev"
+const Version = "1.47.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index d923187a7..ceb436c6c 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
go vet -all ./... | fail_on_output
gofmt -s -d -l . 2>&1 | fail_on_output
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
- golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:"
+ golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
go mod tidy
git status --porcelain 2>&1 | fail_on_output || \
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
new file mode 100644
index 000000000..07da5db34
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -0,0 +1,665 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+ "encoding/base64"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/internal/encoding/json"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/set"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Unmarshal reads the given []byte into the given proto.Message.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func Unmarshal(b []byte, m proto.Message) error {
+ return UnmarshalOptions{}.Unmarshal(b, m)
+}
+
+// UnmarshalOptions is a configurable JSON format parser.
+type UnmarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // If AllowPartial is set, input for messages that will result in missing
+ // required fields will not return an error.
+ AllowPartial bool
+
+ // If DiscardUnknown is set, unknown fields are ignored.
+ DiscardUnknown bool
+
+ // Resolver is used for looking up types when unmarshaling
+ // google.protobuf.Any messages or extension fields.
+ // If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ protoregistry.MessageTypeResolver
+ protoregistry.ExtensionTypeResolver
+ }
+}
+
+// Unmarshal reads the given []byte and populates the given proto.Message
+// using options in the UnmarshalOptions object.
+// It will clear the message first before setting the fields.
+// If it returns an error, the given message may be partially set.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
+ return o.unmarshal(b, m)
+}
+
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
+func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
+ proto.Reset(m)
+
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ dec := decoder{json.NewDecoder(b), o}
+ if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
+ return err
+ }
+
+ // Check for EOF.
+ tok, err := dec.Read()
+ if err != nil {
+ return err
+ }
+ if tok.Kind() != json.EOF {
+ return dec.unexpectedTokenError(tok)
+ }
+
+ if o.AllowPartial {
+ return nil
+ }
+ return proto.CheckInitialized(m)
+}
+
+type decoder struct {
+ *json.Decoder
+ opts UnmarshalOptions
+}
+
+// newError returns an error object with position info.
+func (d decoder) newError(pos int, f string, x ...interface{}) error {
+ line, column := d.Position(pos)
+ head := fmt.Sprintf("(line %d:%d): ", line, column)
+ return errors.New(head+f, x...)
+}
+
+// unexpectedTokenError returns a syntax error for the given unexpected token.
+func (d decoder) unexpectedTokenError(tok json.Token) error {
+ return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString())
+}
+
+// syntaxError returns a syntax error for given position.
+func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+ line, column := d.Position(pos)
+ head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
+ return errors.New(head+f, x...)
+}
+
+// unmarshalMessage unmarshals a message into the given protoreflect.Message.
+func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error {
+ if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
+ return unmarshal(d, m)
+ }
+
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if tok.Kind() != json.ObjectOpen {
+ return d.unexpectedTokenError(tok)
+ }
+
+ messageDesc := m.Descriptor()
+ if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+ return errors.New("no support for proto1 MessageSets")
+ }
+
+ var seenNums set.Ints
+ var seenOneofs set.Ints
+ fieldDescs := messageDesc.Fields()
+ for {
+ // Read field name.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ default:
+ return d.unexpectedTokenError(tok)
+ case json.ObjectClose:
+ return nil
+ case json.Name:
+ // Continue below.
+ }
+
+ name := tok.Name()
+ // Unmarshaling a non-custom embedded message in Any will contain the
+ // JSON field "@type" which should be skipped because it is not a field
+ // of the embedded message, but simply an artifact of the Any format.
+ if skipTypeURL && name == "@type" {
+ d.Read()
+ continue
+ }
+
+ // Get the FieldDescriptor.
+ var fd pref.FieldDescriptor
+ if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
+ // Only extension names are in [name] format.
+ extName := pref.FullName(name[1 : len(name)-1])
+ extType, err := d.opts.Resolver.FindExtensionByName(extName)
+ if err != nil && err != protoregistry.NotFound {
+ return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
+ }
+ if extType != nil {
+ fd = extType.TypeDescriptor()
+ if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
+ return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
+ }
+ }
+ } else {
+ // The name can either be the JSON name or the proto field name.
+ fd = fieldDescs.ByJSONName(name)
+ if fd == nil {
+ fd = fieldDescs.ByTextName(name)
+ }
+ }
+ if flags.ProtoLegacy {
+ if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
+ fd = nil // reset since the weak reference is not linked in
+ }
+ }
+
+ if fd == nil {
+ // Field is unknown.
+ if d.opts.DiscardUnknown {
+ if err := d.skipJSONValue(); err != nil {
+ return err
+ }
+ continue
+ }
+ return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+ }
+
+ // Do not allow duplicate fields.
+ num := uint64(fd.Number())
+ if seenNums.Has(num) {
+ return d.newError(tok.Pos(), "duplicate field %v", tok.RawString())
+ }
+ seenNums.Set(num)
+
+ // No need to set values for JSON null unless the field type is
+ // google.protobuf.Value or google.protobuf.NullValue.
+ if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) {
+ d.Read()
+ continue
+ }
+
+ switch {
+ case fd.IsList():
+ list := m.Mutable(fd).List()
+ if err := d.unmarshalList(list, fd); err != nil {
+ return err
+ }
+ case fd.IsMap():
+ mmap := m.Mutable(fd).Map()
+ if err := d.unmarshalMap(mmap, fd); err != nil {
+ return err
+ }
+ default:
+ // If field is a oneof, check if it has already been set.
+ if od := fd.ContainingOneof(); od != nil {
+ idx := uint64(od.Index())
+ if seenOneofs.Has(idx) {
+ return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName())
+ }
+ seenOneofs.Set(idx)
+ }
+
+ // Required or optional fields.
+ if err := d.unmarshalSingular(m, fd); err != nil {
+ return err
+ }
+ }
+ }
+}
+
+func isKnownValue(fd pref.FieldDescriptor) bool {
+ md := fd.Message()
+ return md != nil && md.FullName() == genid.Value_message_fullname
+}
+
+func isNullValue(fd pref.FieldDescriptor) bool {
+ ed := fd.Enum()
+ return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
+}
+
+// unmarshalSingular unmarshals to the non-repeated field specified
+// by the given FieldDescriptor.
+func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error {
+ var val pref.Value
+ var err error
+ switch fd.Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ val = m.NewField(fd)
+ err = d.unmarshalMessage(val.Message(), false)
+ default:
+ val, err = d.unmarshalScalar(fd)
+ }
+
+ if err != nil {
+ return err
+ }
+ m.Set(fd, val)
+ return nil
+}
+
+// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
+// the given FieldDescriptor.
+func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
+ const b32 int = 32
+ const b64 int = 64
+
+ tok, err := d.Read()
+ if err != nil {
+ return pref.Value{}, err
+ }
+
+ kind := fd.Kind()
+ switch kind {
+ case pref.BoolKind:
+ if tok.Kind() == json.Bool {
+ return pref.ValueOfBool(tok.Bool()), nil
+ }
+
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if v, ok := unmarshalInt(tok, b32); ok {
+ return v, nil
+ }
+
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if v, ok := unmarshalInt(tok, b64); ok {
+ return v, nil
+ }
+
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if v, ok := unmarshalUint(tok, b32); ok {
+ return v, nil
+ }
+
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if v, ok := unmarshalUint(tok, b64); ok {
+ return v, nil
+ }
+
+ case pref.FloatKind:
+ if v, ok := unmarshalFloat(tok, b32); ok {
+ return v, nil
+ }
+
+ case pref.DoubleKind:
+ if v, ok := unmarshalFloat(tok, b64); ok {
+ return v, nil
+ }
+
+ case pref.StringKind:
+ if tok.Kind() == json.String {
+ return pref.ValueOfString(tok.ParsedString()), nil
+ }
+
+ case pref.BytesKind:
+ if v, ok := unmarshalBytes(tok); ok {
+ return v, nil
+ }
+
+ case pref.EnumKind:
+ if v, ok := unmarshalEnum(tok, fd); ok {
+ return v, nil
+ }
+
+ default:
+ panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
+ }
+
+ return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+}
+
+func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) {
+ switch tok.Kind() {
+ case json.Number:
+ return getInt(tok, bitSize)
+
+ case json.String:
+ // Decode number from string.
+ s := strings.TrimSpace(tok.ParsedString())
+ if len(s) != len(tok.ParsedString()) {
+ return pref.Value{}, false
+ }
+ dec := json.NewDecoder([]byte(s))
+ tok, err := dec.Read()
+ if err != nil {
+ return pref.Value{}, false
+ }
+ return getInt(tok, bitSize)
+ }
+ return pref.Value{}, false
+}
+
+func getInt(tok json.Token, bitSize int) (pref.Value, bool) {
+ n, ok := tok.Int(bitSize)
+ if !ok {
+ return pref.Value{}, false
+ }
+ if bitSize == 32 {
+ return pref.ValueOfInt32(int32(n)), true
+ }
+ return pref.ValueOfInt64(n), true
+}
+
+func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) {
+ switch tok.Kind() {
+ case json.Number:
+ return getUint(tok, bitSize)
+
+ case json.String:
+ // Decode number from string.
+ s := strings.TrimSpace(tok.ParsedString())
+ if len(s) != len(tok.ParsedString()) {
+ return pref.Value{}, false
+ }
+ dec := json.NewDecoder([]byte(s))
+ tok, err := dec.Read()
+ if err != nil {
+ return pref.Value{}, false
+ }
+ return getUint(tok, bitSize)
+ }
+ return pref.Value{}, false
+}
+
+func getUint(tok json.Token, bitSize int) (pref.Value, bool) {
+ n, ok := tok.Uint(bitSize)
+ if !ok {
+ return pref.Value{}, false
+ }
+ if bitSize == 32 {
+ return pref.ValueOfUint32(uint32(n)), true
+ }
+ return pref.ValueOfUint64(n), true
+}
+
+func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) {
+ switch tok.Kind() {
+ case json.Number:
+ return getFloat(tok, bitSize)
+
+ case json.String:
+ s := tok.ParsedString()
+ switch s {
+ case "NaN":
+ if bitSize == 32 {
+ return pref.ValueOfFloat32(float32(math.NaN())), true
+ }
+ return pref.ValueOfFloat64(math.NaN()), true
+ case "Infinity":
+ if bitSize == 32 {
+ return pref.ValueOfFloat32(float32(math.Inf(+1))), true
+ }
+ return pref.ValueOfFloat64(math.Inf(+1)), true
+ case "-Infinity":
+ if bitSize == 32 {
+ return pref.ValueOfFloat32(float32(math.Inf(-1))), true
+ }
+ return pref.ValueOfFloat64(math.Inf(-1)), true
+ }
+
+ // Decode number from string.
+ if len(s) != len(strings.TrimSpace(s)) {
+ return pref.Value{}, false
+ }
+ dec := json.NewDecoder([]byte(s))
+ tok, err := dec.Read()
+ if err != nil {
+ return pref.Value{}, false
+ }
+ return getFloat(tok, bitSize)
+ }
+ return pref.Value{}, false
+}
+
+func getFloat(tok json.Token, bitSize int) (pref.Value, bool) {
+ n, ok := tok.Float(bitSize)
+ if !ok {
+ return pref.Value{}, false
+ }
+ if bitSize == 32 {
+ return pref.ValueOfFloat32(float32(n)), true
+ }
+ return pref.ValueOfFloat64(n), true
+}
+
+func unmarshalBytes(tok json.Token) (pref.Value, bool) {
+ if tok.Kind() != json.String {
+ return pref.Value{}, false
+ }
+
+ s := tok.ParsedString()
+ enc := base64.StdEncoding
+ if strings.ContainsAny(s, "-_") {
+ enc = base64.URLEncoding
+ }
+ if len(s)%4 != 0 {
+ enc = enc.WithPadding(base64.NoPadding)
+ }
+ b, err := enc.DecodeString(s)
+ if err != nil {
+ return pref.Value{}, false
+ }
+ return pref.ValueOfBytes(b), true
+}
+
+func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) {
+ switch tok.Kind() {
+ case json.String:
+ // Lookup EnumNumber based on name.
+ s := tok.ParsedString()
+ if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil {
+ return pref.ValueOfEnum(enumVal.Number()), true
+ }
+
+ case json.Number:
+ if n, ok := tok.Int(32); ok {
+ return pref.ValueOfEnum(pref.EnumNumber(n)), true
+ }
+
+ case json.Null:
+ // This is only valid for google.protobuf.NullValue.
+ if isNullValue(fd) {
+ return pref.ValueOfEnum(0), true
+ }
+ }
+
+ return pref.Value{}, false
+}
+
+func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if tok.Kind() != json.ArrayOpen {
+ return d.unexpectedTokenError(tok)
+ }
+
+ switch fd.Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ if tok.Kind() == json.ArrayClose {
+ d.Read()
+ return nil
+ }
+
+ val := list.NewElement()
+ if err := d.unmarshalMessage(val.Message(), false); err != nil {
+ return err
+ }
+ list.Append(val)
+ }
+ default:
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ if tok.Kind() == json.ArrayClose {
+ d.Read()
+ return nil
+ }
+
+ val, err := d.unmarshalScalar(fd)
+ if err != nil {
+ return err
+ }
+ list.Append(val)
+ }
+ }
+
+ return nil
+}
+
+func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if tok.Kind() != json.ObjectOpen {
+ return d.unexpectedTokenError(tok)
+ }
+
+ // Determine ahead whether map entry is a scalar type or a message type in
+ // order to call the appropriate unmarshalMapValue func inside the for loop
+ // below.
+ var unmarshalMapValue func() (pref.Value, error)
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ unmarshalMapValue = func() (pref.Value, error) {
+ val := mmap.NewValue()
+ if err := d.unmarshalMessage(val.Message(), false); err != nil {
+ return pref.Value{}, err
+ }
+ return val, nil
+ }
+ default:
+ unmarshalMapValue = func() (pref.Value, error) {
+ return d.unmarshalScalar(fd.MapValue())
+ }
+ }
+
+Loop:
+ for {
+ // Read field name.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ default:
+ return d.unexpectedTokenError(tok)
+ case json.ObjectClose:
+ break Loop
+ case json.Name:
+ // Continue.
+ }
+
+ // Unmarshal field name.
+ pkey, err := d.unmarshalMapKey(tok, fd.MapKey())
+ if err != nil {
+ return err
+ }
+
+ // Check for duplicate field name.
+ if mmap.Has(pkey) {
+ return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString())
+ }
+
+ // Read and unmarshal field value.
+ pval, err := unmarshalMapValue()
+ if err != nil {
+ return err
+ }
+
+ mmap.Set(pkey, pval)
+ }
+
+ return nil
+}
+
+// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
+// A map key type is any integral or string type.
+func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) {
+ const b32 = 32
+ const b64 = 64
+ const base10 = 10
+
+ name := tok.Name()
+ kind := fd.Kind()
+ switch kind {
+ case pref.StringKind:
+ return pref.ValueOfString(name).MapKey(), nil
+
+ case pref.BoolKind:
+ switch name {
+ case "true":
+ return pref.ValueOfBool(true).MapKey(), nil
+ case "false":
+ return pref.ValueOfBool(false).MapKey(), nil
+ }
+
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if n, err := strconv.ParseInt(name, base10, b32); err == nil {
+ return pref.ValueOfInt32(int32(n)).MapKey(), nil
+ }
+
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if n, err := strconv.ParseInt(name, base10, b64); err == nil {
+ return pref.ValueOfInt64(int64(n)).MapKey(), nil
+ }
+
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if n, err := strconv.ParseUint(name, base10, b32); err == nil {
+ return pref.ValueOfUint32(uint32(n)).MapKey(), nil
+ }
+
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if n, err := strconv.ParseUint(name, base10, b64); err == nil {
+ return pref.ValueOfUint64(uint64(n)).MapKey(), nil
+ }
+
+ default:
+ panic(fmt.Sprintf("invalid kind for map key: %v", kind))
+ }
+
+ return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
new file mode 100644
index 000000000..00ea2fecf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protojson marshals and unmarshals protocol buffer messages as JSON
+// format. It follows the guide at
+// https://developers.google.com/protocol-buffers/docs/proto3#json.
+//
+// This package produces a different output than the standard "encoding/json"
+// package, which does not operate correctly on protocol buffer messages.
+package protojson
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
new file mode 100644
index 000000000..ba971f078
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -0,0 +1,344 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "google.golang.org/protobuf/internal/encoding/json"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/order"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const defaultIndent = " "
+
+// Format formats the message as a multiline string.
+// This function is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func Format(m proto.Message) string {
+ return MarshalOptions{Multiline: true}.Format(m)
+}
+
+// Marshal writes the given proto.Message in JSON format using default options.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func Marshal(m proto.Message) ([]byte, error) {
+ return MarshalOptions{}.Marshal(m)
+}
+
+// MarshalOptions is a configurable JSON format marshaler.
+type MarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // Multiline specifies whether the marshaler should format the output in
+ // indented-form with every textual element on a new line.
+ // If Indent is an empty string, then an arbitrary indent is chosen.
+ Multiline bool
+
+ // Indent specifies the set of indentation characters to use in a multiline
+ // formatted output such that every entry is preceded by Indent and
+ // terminated by a newline. If non-empty, then Multiline is treated as true.
+ // Indent can only be composed of space or tab characters.
+ Indent string
+
+ // AllowPartial allows messages that have missing required fields to marshal
+ // without returning an error. If AllowPartial is false (the default),
+ // Marshal will return error if there are any missing required fields.
+ AllowPartial bool
+
+ // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON
+ // field names.
+ UseProtoNames bool
+
+ // UseEnumNumbers emits enum values as numbers.
+ UseEnumNumbers bool
+
+ // EmitUnpopulated specifies whether to emit unpopulated fields. It does not
+ // emit unpopulated oneof fields or unpopulated extension fields.
+ // The JSON value emitted for unpopulated fields are as follows:
+ // ╔═══════╤════════════════════════════╗
+ // ║ JSON │ Protobuf field ║
+ // ╠═══════╪════════════════════════════╣
+ // ║ false │ proto3 boolean fields ║
+ // ║ 0 │ proto3 numeric fields ║
+ // ║ "" │ proto3 string/bytes fields ║
+ // ║ null │ proto2 scalar fields ║
+ // ║ null │ message fields ║
+ // ║ [] │ list fields ║
+ // ║ {} │ map fields ║
+ // ╚═══════╧════════════════════════════╝
+ EmitUnpopulated bool
+
+ // Resolver is used for looking up types when expanding google.protobuf.Any
+ // messages. If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ protoregistry.ExtensionTypeResolver
+ protoregistry.MessageTypeResolver
+ }
+}
+
+// Format formats the message as a string.
+// This method is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func (o MarshalOptions) Format(m proto.Message) string {
+ if m == nil || !m.ProtoReflect().IsValid() {
+ return "<nil>" // invalid syntax, but okay since this is for debugging
+ }
+ o.AllowPartial = true
+ b, _ := o.Marshal(m)
+ return string(b)
+}
+
+// Marshal marshals the given proto.Message in the JSON format using options in
+// MarshalOptions. Do not depend on the output being stable. It may change over
+// time across different versions of the program.
+func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
+ return o.marshal(m)
+}
+
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
+func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
+ if o.Multiline && o.Indent == "" {
+ o.Indent = defaultIndent
+ }
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ internalEnc, err := json.NewEncoder(o.Indent)
+ if err != nil {
+ return nil, err
+ }
+
+ // Treat nil message interface as an empty message,
+ // in which case the output in an empty JSON object.
+ if m == nil {
+ return []byte("{}"), nil
+ }
+
+ enc := encoder{internalEnc, o}
+ if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil {
+ return nil, err
+ }
+ if o.AllowPartial {
+ return enc.Bytes(), nil
+ }
+ return enc.Bytes(), proto.CheckInitialized(m)
+}
+
+type encoder struct {
+ *json.Encoder
+ opts MarshalOptions
+}
+
+// typeFieldDesc is a synthetic field descriptor used for the "@type" field.
+var typeFieldDesc = func() protoreflect.FieldDescriptor {
+ var fd filedesc.Field
+ fd.L0.FullName = "@type"
+ fd.L0.Index = -1
+ fd.L1.Cardinality = protoreflect.Optional
+ fd.L1.Kind = protoreflect.StringKind
+ return &fd
+}()
+
+// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method
+// to additionally iterate over a synthetic field for the type URL.
+type typeURLFieldRanger struct {
+ order.FieldRanger
+ typeURL string
+}
+
+func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+ if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) {
+ return
+ }
+ m.FieldRanger.Range(f)
+}
+
+// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
+// method to additionally iterate over unpopulated fields.
+type unpopulatedFieldRanger struct{ pref.Message }
+
+func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if m.Has(fd) || fd.ContainingOneof() != nil {
+ continue // ignore populated fields and fields within a oneofs
+ }
+
+ v := m.Get(fd)
+ isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid()
+ isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil
+ if isProto2Scalar || isSingularMessage {
+ v = pref.Value{} // use invalid value to emit null
+ }
+ if !f(fd, v) {
+ return
+ }
+ }
+ m.Message.Range(f)
+}
+
+// marshalMessage marshals the fields in the given protoreflect.Message.
+// If the typeURL is non-empty, then a synthetic "@type" field is injected
+// containing the URL as the value.
+func (e encoder) marshalMessage(m pref.Message, typeURL string) error {
+ if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
+ return errors.New("no support for proto1 MessageSets")
+ }
+
+ if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil {
+ return marshal(e, m)
+ }
+
+ e.StartObject()
+ defer e.EndObject()
+
+ var fields order.FieldRanger = m
+ if e.opts.EmitUnpopulated {
+ fields = unpopulatedFieldRanger{m}
+ }
+ if typeURL != "" {
+ fields = typeURLFieldRanger{fields, typeURL}
+ }
+
+ var err error
+ order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool {
+ name := fd.JSONName()
+ if e.opts.UseProtoNames {
+ name = fd.TextName()
+ }
+
+ if err = e.WriteName(name); err != nil {
+ return false
+ }
+ if err = e.marshalValue(v, fd); err != nil {
+ return false
+ }
+ return true
+ })
+ return err
+}
+
+// marshalValue marshals the given protoreflect.Value.
+func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error {
+ switch {
+ case fd.IsList():
+ return e.marshalList(val.List(), fd)
+ case fd.IsMap():
+ return e.marshalMap(val.Map(), fd)
+ default:
+ return e.marshalSingular(val, fd)
+ }
+}
+
+// marshalSingular marshals the given non-repeated field value. This includes
+// all scalar types, enums, messages, and groups.
+func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
+ if !val.IsValid() {
+ e.WriteNull()
+ return nil
+ }
+
+ switch kind := fd.Kind(); kind {
+ case pref.BoolKind:
+ e.WriteBool(val.Bool())
+
+ case pref.StringKind:
+ if e.WriteString(val.String()) != nil {
+ return errors.InvalidUTF8(string(fd.FullName()))
+ }
+
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ e.WriteInt(val.Int())
+
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ e.WriteUint(val.Uint())
+
+ case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind,
+ pref.Sfixed64Kind, pref.Fixed64Kind:
+ // 64-bit integers are written out as JSON string.
+ e.WriteString(val.String())
+
+ case pref.FloatKind:
+ // Encoder.WriteFloat handles the special numbers NaN and infinites.
+ e.WriteFloat(val.Float(), 32)
+
+ case pref.DoubleKind:
+ // Encoder.WriteFloat handles the special numbers NaN and infinites.
+ e.WriteFloat(val.Float(), 64)
+
+ case pref.BytesKind:
+ e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
+
+ case pref.EnumKind:
+ if fd.Enum().FullName() == genid.NullValue_enum_fullname {
+ e.WriteNull()
+ } else {
+ desc := fd.Enum().Values().ByNumber(val.Enum())
+ if e.opts.UseEnumNumbers || desc == nil {
+ e.WriteInt(int64(val.Enum()))
+ } else {
+ e.WriteString(string(desc.Name()))
+ }
+ }
+
+ case pref.MessageKind, pref.GroupKind:
+ if err := e.marshalMessage(val.Message(), ""); err != nil {
+ return err
+ }
+
+ default:
+ panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
+ }
+ return nil
+}
+
+// marshalList marshals the given protoreflect.List.
+func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error {
+ e.StartArray()
+ defer e.EndArray()
+
+ for i := 0; i < list.Len(); i++ {
+ item := list.Get(i)
+ if err := e.marshalSingular(item, fd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// marshalMap marshals given protoreflect.Map.
+func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
+ e.StartObject()
+ defer e.EndObject()
+
+ var err error
+ order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool {
+ if err = e.WriteName(k.String()); err != nil {
+ return false
+ }
+ if err = e.marshalSingular(v, fd.MapValue()); err != nil {
+ return false
+ }
+ return true
+ })
+ return err
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
new file mode 100644
index 000000000..72924a905
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -0,0 +1,889 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ "google.golang.org/protobuf/internal/encoding/json"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type marshalFunc func(encoder, pref.Message) error
+
+// wellKnownTypeMarshaler returns a marshal function if the message type
+// has specialized serialization behavior. It returns nil otherwise.
+func wellKnownTypeMarshaler(name pref.FullName) marshalFunc {
+ if name.Parent() == genid.GoogleProtobuf_package {
+ switch name.Name() {
+ case genid.Any_message_name:
+ return encoder.marshalAny
+ case genid.Timestamp_message_name:
+ return encoder.marshalTimestamp
+ case genid.Duration_message_name:
+ return encoder.marshalDuration
+ case genid.BoolValue_message_name,
+ genid.Int32Value_message_name,
+ genid.Int64Value_message_name,
+ genid.UInt32Value_message_name,
+ genid.UInt64Value_message_name,
+ genid.FloatValue_message_name,
+ genid.DoubleValue_message_name,
+ genid.StringValue_message_name,
+ genid.BytesValue_message_name:
+ return encoder.marshalWrapperType
+ case genid.Struct_message_name:
+ return encoder.marshalStruct
+ case genid.ListValue_message_name:
+ return encoder.marshalListValue
+ case genid.Value_message_name:
+ return encoder.marshalKnownValue
+ case genid.FieldMask_message_name:
+ return encoder.marshalFieldMask
+ case genid.Empty_message_name:
+ return encoder.marshalEmpty
+ }
+ }
+ return nil
+}
+
+type unmarshalFunc func(decoder, pref.Message) error
+
+// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
+// has specialized serialization behavior. It returns nil otherwise.
+func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc {
+ if name.Parent() == genid.GoogleProtobuf_package {
+ switch name.Name() {
+ case genid.Any_message_name:
+ return decoder.unmarshalAny
+ case genid.Timestamp_message_name:
+ return decoder.unmarshalTimestamp
+ case genid.Duration_message_name:
+ return decoder.unmarshalDuration
+ case genid.BoolValue_message_name,
+ genid.Int32Value_message_name,
+ genid.Int64Value_message_name,
+ genid.UInt32Value_message_name,
+ genid.UInt64Value_message_name,
+ genid.FloatValue_message_name,
+ genid.DoubleValue_message_name,
+ genid.StringValue_message_name,
+ genid.BytesValue_message_name:
+ return decoder.unmarshalWrapperType
+ case genid.Struct_message_name:
+ return decoder.unmarshalStruct
+ case genid.ListValue_message_name:
+ return decoder.unmarshalListValue
+ case genid.Value_message_name:
+ return decoder.unmarshalKnownValue
+ case genid.FieldMask_message_name:
+ return decoder.unmarshalFieldMask
+ case genid.Empty_message_name:
+ return decoder.unmarshalEmpty
+ }
+ }
+ return nil
+}
+
+// The JSON representation of an Any message uses the regular representation of
+// the deserialized, embedded message, with an additional field `@type` which
+// contains the type URL. If the embedded message type is well-known and has a
+// custom JSON representation, that representation will be embedded adding a
+// field `value` which holds the custom JSON in addition to the `@type` field.
+
+func (e encoder) marshalAny(m pref.Message) error {
+ fds := m.Descriptor().Fields()
+ fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
+ fdValue := fds.ByNumber(genid.Any_Value_field_number)
+
+ if !m.Has(fdType) {
+ if !m.Has(fdValue) {
+ // If message is empty, marshal out empty JSON object.
+ e.StartObject()
+ e.EndObject()
+ return nil
+ } else {
+ // Return error if type_url field is not set, but value is set.
+ return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name)
+ }
+ }
+
+ typeVal := m.Get(fdType)
+ valueVal := m.Get(fdValue)
+
+ // Resolve the type in order to unmarshal value field.
+ typeURL := typeVal.String()
+ emt, err := e.opts.Resolver.FindMessageByURL(typeURL)
+ if err != nil {
+ return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err)
+ }
+
+ em := emt.New()
+ err = proto.UnmarshalOptions{
+ AllowPartial: true, // never check required fields inside an Any
+ Resolver: e.opts.Resolver,
+ }.Unmarshal(valueVal.Bytes(), em.Interface())
+ if err != nil {
+ return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err)
+ }
+
+ // If type of value has custom JSON encoding, marshal out a field "value"
+ // with corresponding custom JSON encoding of the embedded message as a
+ // field.
+ if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil {
+ e.StartObject()
+ defer e.EndObject()
+
+ // Marshal out @type field.
+ e.WriteName("@type")
+ if err := e.WriteString(typeURL); err != nil {
+ return err
+ }
+
+ e.WriteName("value")
+ return marshal(e, em)
+ }
+
+ // Else, marshal out the embedded message's fields in this Any object.
+ if err := e.marshalMessage(em, typeURL); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d decoder) unmarshalAny(m pref.Message) error {
+ // Peek to check for json.ObjectOpen to avoid advancing a read.
+ start, err := d.Peek()
+ if err != nil {
+ return err
+ }
+ if start.Kind() != json.ObjectOpen {
+ return d.unexpectedTokenError(start)
+ }
+
+ // Use another decoder to parse the unread bytes for @type field. This
+ // avoids advancing a read from current decoder because the current JSON
+ // object may contain the fields of the embedded type.
+ dec := decoder{d.Clone(), UnmarshalOptions{}}
+ tok, err := findTypeURL(dec)
+ switch err {
+ case errEmptyObject:
+ // An empty JSON object translates to an empty Any message.
+ d.Read() // Read json.ObjectOpen.
+ d.Read() // Read json.ObjectClose.
+ return nil
+
+ case errMissingType:
+ if d.opts.DiscardUnknown {
+ // Treat all fields as unknowns, similar to an empty object.
+ return d.skipJSONValue()
+ }
+ // Use start.Pos() for line position.
+ return d.newError(start.Pos(), err.Error())
+
+ default:
+ if err != nil {
+ return err
+ }
+ }
+
+ typeURL := tok.ParsedString()
+ emt, err := d.opts.Resolver.FindMessageByURL(typeURL)
+ if err != nil {
+ return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err)
+ }
+
+ // Create new message for the embedded message type and unmarshal into it.
+ em := emt.New()
+ if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil {
+ // If embedded message is a custom type,
+ // unmarshal the JSON "value" field into it.
+ if err := d.unmarshalAnyValue(unmarshal, em); err != nil {
+ return err
+ }
+ } else {
+ // Else unmarshal the current JSON object into it.
+ if err := d.unmarshalMessage(em, true); err != nil {
+ return err
+ }
+ }
+ // Serialize the embedded message and assign the resulting bytes to the
+ // proto value field.
+ b, err := proto.MarshalOptions{
+ AllowPartial: true, // No need to check required fields inside an Any.
+ Deterministic: true,
+ }.Marshal(em.Interface())
+ if err != nil {
+ return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err)
+ }
+
+ fds := m.Descriptor().Fields()
+ fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
+ fdValue := fds.ByNumber(genid.Any_Value_field_number)
+
+ m.Set(fdType, pref.ValueOfString(typeURL))
+ m.Set(fdValue, pref.ValueOfBytes(b))
+ return nil
+}
+
+var errEmptyObject = fmt.Errorf(`empty object`)
+var errMissingType = fmt.Errorf(`missing "@type" field`)
+
+// findTypeURL returns the token for the "@type" field value from the given
+// JSON bytes. It is expected that the given bytes start with json.ObjectOpen.
+// It returns errEmptyObject if the JSON object is empty or errMissingType if
+// @type field does not exist. It returns other error if the @type field is not
+// valid or other decoding issues.
+func findTypeURL(d decoder) (json.Token, error) {
+ var typeURL string
+ var typeTok json.Token
+ numFields := 0
+ // Skip start object.
+ d.Read()
+
+Loop:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return json.Token{}, err
+ }
+
+ switch tok.Kind() {
+ case json.ObjectClose:
+ if typeURL == "" {
+ // Did not find @type field.
+ if numFields > 0 {
+ return json.Token{}, errMissingType
+ }
+ return json.Token{}, errEmptyObject
+ }
+ break Loop
+
+ case json.Name:
+ numFields++
+ if tok.Name() != "@type" {
+ // Skip value.
+ if err := d.skipJSONValue(); err != nil {
+ return json.Token{}, err
+ }
+ continue
+ }
+
+ // Return error if this was previously set already.
+ if typeURL != "" {
+ return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`)
+ }
+ // Read field value.
+ tok, err := d.Read()
+ if err != nil {
+ return json.Token{}, err
+ }
+ if tok.Kind() != json.String {
+ return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString())
+ }
+ typeURL = tok.ParsedString()
+ if typeURL == "" {
+ return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`)
+ }
+ typeTok = tok
+ }
+ }
+
+ return typeTok, nil
+}
+
+// skipJSONValue parses a JSON value (null, boolean, string, number, object and
+// array) in order to advance the read to the next JSON value. It relies on
+// the decoder returning an error if the types are not in valid sequence.
+func (d decoder) skipJSONValue() error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ // Only need to continue reading for objects and arrays.
+ switch tok.Kind() {
+ case json.ObjectOpen:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case json.ObjectClose:
+ return nil
+ case json.Name:
+ // Skip object field value.
+ if err := d.skipJSONValue(); err != nil {
+ return err
+ }
+ }
+ }
+
+ case json.ArrayOpen:
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case json.ArrayClose:
+ d.Read()
+ return nil
+ default:
+ // Skip array item.
+ if err := d.skipJSONValue(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// unmarshalAnyValue unmarshals the given custom-type message from the JSON
+// object's "value" field.
+func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error {
+ // Skip ObjectOpen, and start reading the fields.
+ d.Read()
+
+ var found bool // Used for detecting duplicate "value".
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case json.ObjectClose:
+ if !found {
+ return d.newError(tok.Pos(), `missing "value" field`)
+ }
+ return nil
+
+ case json.Name:
+ switch tok.Name() {
+ case "@type":
+ // Skip the value as this was previously parsed already.
+ d.Read()
+
+ case "value":
+ if found {
+ return d.newError(tok.Pos(), `duplicate "value" field`)
+ }
+ // Unmarshal the field value into the given message.
+ if err := unmarshal(d, m); err != nil {
+ return err
+ }
+ found = true
+
+ default:
+ if d.opts.DiscardUnknown {
+ if err := d.skipJSONValue(); err != nil {
+ return err
+ }
+ continue
+ }
+ return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+ }
+ }
+ }
+}
+
+// Wrapper types are encoded as JSON primitives like string, number or boolean.
+
+func (e encoder) marshalWrapperType(m pref.Message) error {
+ fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
+ val := m.Get(fd)
+ return e.marshalSingular(val, fd)
+}
+
+func (d decoder) unmarshalWrapperType(m pref.Message) error {
+ fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
+ val, err := d.unmarshalScalar(fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, val)
+ return nil
+}
+
+// The JSON representation for Empty is an empty JSON object.
+
+func (e encoder) marshalEmpty(pref.Message) error {
+ e.StartObject()
+ e.EndObject()
+ return nil
+}
+
+func (d decoder) unmarshalEmpty(pref.Message) error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if tok.Kind() != json.ObjectOpen {
+ return d.unexpectedTokenError(tok)
+ }
+
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case json.ObjectClose:
+ return nil
+
+ case json.Name:
+ if d.opts.DiscardUnknown {
+ if err := d.skipJSONValue(); err != nil {
+ return err
+ }
+ continue
+ }
+ return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+}
+
+// The JSON representation for Struct is a JSON object that contains the encoded
+// Struct.fields map and follows the serialization rules for a map.
+
+func (e encoder) marshalStruct(m pref.Message) error {
+ fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
+ return e.marshalMap(m.Get(fd).Map(), fd)
+}
+
+func (d decoder) unmarshalStruct(m pref.Message) error {
+ fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
+ return d.unmarshalMap(m.Mutable(fd).Map(), fd)
+}
+
+// The JSON representation for ListValue is JSON array that contains the encoded
+// ListValue.values repeated field and follows the serialization rules for a
+// repeated field.
+
+func (e encoder) marshalListValue(m pref.Message) error {
+ fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
+ return e.marshalList(m.Get(fd).List(), fd)
+}
+
+func (d decoder) unmarshalListValue(m pref.Message) error {
+ fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
+ return d.unmarshalList(m.Mutable(fd).List(), fd)
+}
+
+// The JSON representation for a Value is dependent on the oneof field that is
+// set. Each of the field in the oneof has its own custom serialization rule. A
+// Value message needs to be a oneof field set, else it is an error.
+
+func (e encoder) marshalKnownValue(m pref.Message) error {
+ od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
+ fd := m.WhichOneof(od)
+ if fd == nil {
+ return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname)
+ }
+ if fd.Number() == genid.Value_NumberValue_field_number {
+ if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) {
+ return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v)
+ }
+ }
+ return e.marshalSingular(m.Get(fd), fd)
+}
+
+func (d decoder) unmarshalKnownValue(m pref.Message) error {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ var fd pref.FieldDescriptor
+ var val pref.Value
+ switch tok.Kind() {
+ case json.Null:
+ d.Read()
+ fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
+ val = pref.ValueOfEnum(0)
+
+ case json.Bool:
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
+ val = pref.ValueOfBool(tok.Bool())
+
+ case json.Number:
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number)
+ var ok bool
+ val, ok = unmarshalFloat(tok, 64)
+ if !ok {
+ return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
+ }
+
+ case json.String:
+ // A JSON string may have been encoded from the number_value field,
+ // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows
+ // for it to be in JSON string form. Given this custom encoding spec,
+ // however, there is no way to identify that and hence a JSON string is
+ // always assigned to the string_value field, which means that certain
+ // encoding cannot be parsed back to the same field.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
+ val = pref.ValueOfString(tok.ParsedString())
+
+ case json.ObjectOpen:
+ fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
+ val = m.NewField(fd)
+ if err := d.unmarshalStruct(val.Message()); err != nil {
+ return err
+ }
+
+ case json.ArrayOpen:
+ fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number)
+ val = m.NewField(fd)
+ if err := d.unmarshalListValue(val.Message()); err != nil {
+ return err
+ }
+
+ default:
+ return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
+ }
+
+ m.Set(fd, val)
+ return nil
+}
+
+// The JSON representation for a Duration is a JSON string that ends in the
+// suffix "s" (indicating seconds) and is preceded by the number of seconds,
+// with nanoseconds expressed as fractional seconds.
+//
+// Durations less than one second are represented with a 0 seconds field and a
+// positive or negative nanos field. For durations of one second or more, a
+// non-zero value for the nanos field must be of the same sign as the seconds
+// field.
+//
+// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive.
+// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive.
+
+const (
+ secondsInNanos = 999999999
+ maxSecondsInDuration = 315576000000
+)
+
+func (e encoder) marshalDuration(m pref.Message) error {
+ fds := m.Descriptor().Fields()
+ fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
+ fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
+
+ secsVal := m.Get(fdSeconds)
+ nanosVal := m.Get(fdNanos)
+ secs := secsVal.Int()
+ nanos := nanosVal.Int()
+ if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
+ return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs)
+ }
+ if nanos < -secondsInNanos || nanos > secondsInNanos {
+ return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos)
+ }
+ if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) {
+ return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname)
+ }
+ // Generated output always contains 0, 3, 6, or 9 fractional digits,
+ // depending on required precision, followed by the suffix "s".
+ var sign string
+ if secs < 0 || nanos < 0 {
+ sign, secs, nanos = "-", -1*secs, -1*nanos
+ }
+ x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos)
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ e.WriteString(x + "s")
+ return nil
+}
+
+func (d decoder) unmarshalDuration(m pref.Message) error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if tok.Kind() != json.String {
+ return d.unexpectedTokenError(tok)
+ }
+
+ secs, nanos, ok := parseDuration(tok.ParsedString())
+ if !ok {
+ return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString())
+ }
+ // Validate seconds. No need to validate nanos because parseDuration would
+ // have covered that already.
+ if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
+ return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString())
+ }
+
+ fds := m.Descriptor().Fields()
+ fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
+ fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
+
+ m.Set(fdSeconds, pref.ValueOfInt64(secs))
+ m.Set(fdNanos, pref.ValueOfInt32(nanos))
+ return nil
+}
+
+// parseDuration parses the given input string for seconds and nanoseconds value
+// for the Duration JSON format. The format is a decimal number with a suffix
+// 's'. It can have optional plus/minus sign. There needs to be at least an
+// integer or fractional part. Fractional part is limited to 9 digits only for
+// nanoseconds precision, regardless of whether there are trailing zero digits.
+// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s.
+func parseDuration(input string) (int64, int32, bool) {
+ b := []byte(input)
+ size := len(b)
+ if size < 2 {
+ return 0, 0, false
+ }
+ if b[size-1] != 's' {
+ return 0, 0, false
+ }
+ b = b[:size-1]
+
+ // Read optional plus/minus symbol.
+ var neg bool
+ switch b[0] {
+ case '-':
+ neg = true
+ b = b[1:]
+ case '+':
+ b = b[1:]
+ }
+ if len(b) == 0 {
+ return 0, 0, false
+ }
+
+ // Read the integer part.
+ var intp []byte
+ switch {
+ case b[0] == '0':
+ b = b[1:]
+
+ case '1' <= b[0] && b[0] <= '9':
+ intp = b[0:]
+ b = b[1:]
+ n := 1
+ for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
+ n++
+ b = b[1:]
+ }
+ intp = intp[:n]
+
+ case b[0] == '.':
+ // Continue below.
+
+ default:
+ return 0, 0, false
+ }
+
+ hasFrac := false
+ var frac [9]byte
+ if len(b) > 0 {
+ if b[0] != '.' {
+ return 0, 0, false
+ }
+ // Read the fractional part.
+ b = b[1:]
+ n := 0
+ for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' {
+ frac[n] = b[0]
+ n++
+ b = b[1:]
+ }
+ // It is not valid if there are more bytes left.
+ if len(b) > 0 {
+ return 0, 0, false
+ }
+ // Pad fractional part with 0s.
+ for i := n; i < 9; i++ {
+ frac[i] = '0'
+ }
+ hasFrac = true
+ }
+
+ var secs int64
+ if len(intp) > 0 {
+ var err error
+ secs, err = strconv.ParseInt(string(intp), 10, 64)
+ if err != nil {
+ return 0, 0, false
+ }
+ }
+
+ var nanos int64
+ if hasFrac {
+ nanob := bytes.TrimLeft(frac[:], "0")
+ if len(nanob) > 0 {
+ var err error
+ nanos, err = strconv.ParseInt(string(nanob), 10, 32)
+ if err != nil {
+ return 0, 0, false
+ }
+ }
+ }
+
+ if neg {
+ if secs > 0 {
+ secs = -secs
+ }
+ if nanos > 0 {
+ nanos = -nanos
+ }
+ }
+ return secs, int32(nanos), true
+}
+
+// The JSON representation for a Timestamp is a JSON string in the RFC 3339
+// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where
+// {year} is always expressed using four digits while {month}, {day}, {hour},
+// {min}, and {sec} are zero-padded to two digits each. The fractional seconds,
+// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The
+// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding
+// should always use UTC (as indicated by "Z") and a decoder should be able to
+// accept both UTC and other timezones (as indicated by an offset).
+//
+// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z
+// inclusive.
+// Timestamp.nanos must be from 0 to 999,999,999 inclusive.
+
+const (
+ maxTimestampSeconds = 253402300799
+ minTimestampSeconds = -62135596800
+)
+
+func (e encoder) marshalTimestamp(m pref.Message) error {
+ fds := m.Descriptor().Fields()
+ fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
+ fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
+
+ secsVal := m.Get(fdSeconds)
+ nanosVal := m.Get(fdNanos)
+ secs := secsVal.Int()
+ nanos := nanosVal.Int()
+ if secs < minTimestampSeconds || secs > maxTimestampSeconds {
+ return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs)
+ }
+ if nanos < 0 || nanos > secondsInNanos {
+ return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos)
+ }
+ // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3,
+ // 6 or 9 fractional digits.
+ t := time.Unix(secs, nanos).UTC()
+ x := t.Format("2006-01-02T15:04:05.000000000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ e.WriteString(x + "Z")
+ return nil
+}
+
+func (d decoder) unmarshalTimestamp(m pref.Message) error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if tok.Kind() != json.String {
+ return d.unexpectedTokenError(tok)
+ }
+
+ t, err := time.Parse(time.RFC3339Nano, tok.ParsedString())
+ if err != nil {
+ return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
+ }
+ // Validate seconds. No need to validate nanos because time.Parse would have
+ // covered that already.
+ secs := t.Unix()
+ if secs < minTimestampSeconds || secs > maxTimestampSeconds {
+ return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
+ }
+
+ fds := m.Descriptor().Fields()
+ fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
+ fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
+
+ m.Set(fdSeconds, pref.ValueOfInt64(secs))
+ m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond())))
+ return nil
+}
+
+// The JSON representation for a FieldMask is a JSON string where paths are
+// separated by a comma. Fields name in each path are converted to/from
+// lower-camel naming conventions. Encoding should fail if the path name would
+// end up differently after a round-trip.
+
+func (e encoder) marshalFieldMask(m pref.Message) error {
+ fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
+ list := m.Get(fd).List()
+ paths := make([]string, 0, list.Len())
+
+ for i := 0; i < list.Len(); i++ {
+ s := list.Get(i).String()
+ if !pref.FullName(s).IsValid() {
+ return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
+ }
+ // Return error if conversion to camelCase is not reversible.
+ cc := strs.JSONCamelCase(s)
+ if s != strs.JSONSnakeCase(cc) {
+ return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s)
+ }
+ paths = append(paths, cc)
+ }
+
+ e.WriteString(strings.Join(paths, ","))
+ return nil
+}
+
+func (d decoder) unmarshalFieldMask(m pref.Message) error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if tok.Kind() != json.String {
+ return d.unexpectedTokenError(tok)
+ }
+ str := strings.TrimSpace(tok.ParsedString())
+ if str == "" {
+ return nil
+ }
+ paths := strings.Split(str, ",")
+
+ fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
+ list := m.Mutable(fd).List()
+
+ for _, s0 := range paths {
+ s := strs.JSONSnakeCase(s0)
+ if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() {
+ return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
+ }
+ list.Append(pref.ValueOfString(s))
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
new file mode 100644
index 000000000..b13fd29e8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
@@ -0,0 +1,340 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// call specifies which Decoder method was invoked.
+type call uint8
+
+const (
+ readCall call = iota
+ peekCall
+)
+
+const unexpectedFmt = "unexpected token %s"
+
+// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
+var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
+
+// Decoder is a token-based JSON decoder.
+type Decoder struct {
+ // lastCall is last method called, either readCall or peekCall.
+ // Initial value is readCall.
+ lastCall call
+
+ // lastToken contains the last read token.
+ lastToken Token
+
+ // lastErr contains the last read error.
+ lastErr error
+
+ // openStack is a stack containing ObjectOpen and ArrayOpen values. The
+ // top of stack represents the object or the array the current value is
+ // directly located in.
+ openStack []Kind
+
+ // orig is used in reporting line and column.
+ orig []byte
+ // in contains the unconsumed input.
+ in []byte
+}
+
+// NewDecoder returns a Decoder to read the given []byte.
+func NewDecoder(b []byte) *Decoder {
+ return &Decoder{orig: b, in: b}
+}
+
+// Peek looks ahead and returns the next token kind without advancing a read.
+func (d *Decoder) Peek() (Token, error) {
+ defer func() { d.lastCall = peekCall }()
+ if d.lastCall == readCall {
+ d.lastToken, d.lastErr = d.Read()
+ }
+ return d.lastToken, d.lastErr
+}
+
+// Read returns the next JSON token.
+// It will return an error if there is no valid token.
+func (d *Decoder) Read() (Token, error) {
+ const scalar = Null | Bool | Number | String
+
+ defer func() { d.lastCall = readCall }()
+ if d.lastCall == peekCall {
+ return d.lastToken, d.lastErr
+ }
+
+ tok, err := d.parseNext()
+ if err != nil {
+ return Token{}, err
+ }
+
+ switch tok.kind {
+ case EOF:
+ if len(d.openStack) != 0 ||
+ d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 {
+ return Token{}, ErrUnexpectedEOF
+ }
+
+ case Null:
+ if !d.isValueNext() {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+
+ case Bool, Number:
+ if !d.isValueNext() {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+
+ case String:
+ if d.isValueNext() {
+ break
+ }
+ // This string token should only be for a field name.
+ if d.lastToken.kind&(ObjectOpen|comma) == 0 {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+ if len(d.in) == 0 {
+ return Token{}, ErrUnexpectedEOF
+ }
+ if c := d.in[0]; c != ':' {
+ return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c))
+ }
+ tok.kind = Name
+ d.consume(1)
+
+ case ObjectOpen, ArrayOpen:
+ if !d.isValueNext() {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+ d.openStack = append(d.openStack, tok.kind)
+
+ case ObjectClose:
+ if len(d.openStack) == 0 ||
+ d.lastToken.kind == comma ||
+ d.openStack[len(d.openStack)-1] != ObjectOpen {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+ d.openStack = d.openStack[:len(d.openStack)-1]
+
+ case ArrayClose:
+ if len(d.openStack) == 0 ||
+ d.lastToken.kind == comma ||
+ d.openStack[len(d.openStack)-1] != ArrayOpen {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+ d.openStack = d.openStack[:len(d.openStack)-1]
+
+ case comma:
+ if len(d.openStack) == 0 ||
+ d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+ }
+
+ // Update d.lastToken only after validating token to be in the right sequence.
+ d.lastToken = tok
+
+ if d.lastToken.kind == comma {
+ return d.Read()
+ }
+ return tok, nil
+}
+
+// Any sequence that looks like a non-delimiter (for error reporting).
+var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`)
+
+// parseNext parses for the next JSON token. It returns a Token object for
+// different types, except for Name. It does not handle whether the next token
+// is in a valid sequence or not.
+func (d *Decoder) parseNext() (Token, error) {
+ // Trim leading spaces.
+ d.consume(0)
+
+ in := d.in
+ if len(in) == 0 {
+ return d.consumeToken(EOF, 0), nil
+ }
+
+ switch in[0] {
+ case 'n':
+ if n := matchWithDelim("null", in); n != 0 {
+ return d.consumeToken(Null, n), nil
+ }
+
+ case 't':
+ if n := matchWithDelim("true", in); n != 0 {
+ return d.consumeBoolToken(true, n), nil
+ }
+
+ case 'f':
+ if n := matchWithDelim("false", in); n != 0 {
+ return d.consumeBoolToken(false, n), nil
+ }
+
+ case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ if n, ok := parseNumber(in); ok {
+ return d.consumeToken(Number, n), nil
+ }
+
+ case '"':
+ s, n, err := d.parseString(in)
+ if err != nil {
+ return Token{}, err
+ }
+ return d.consumeStringToken(s, n), nil
+
+ case '{':
+ return d.consumeToken(ObjectOpen, 1), nil
+
+ case '}':
+ return d.consumeToken(ObjectClose, 1), nil
+
+ case '[':
+ return d.consumeToken(ArrayOpen, 1), nil
+
+ case ']':
+ return d.consumeToken(ArrayClose, 1), nil
+
+ case ',':
+ return d.consumeToken(comma, 1), nil
+ }
+ return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in))
+}
+
+// newSyntaxError returns an error with line and column information useful for
+// syntax errors.
+func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
+ e := errors.New(f, x...)
+ line, column := d.Position(pos)
+ return errors.New("syntax error (line %d:%d): %v", line, column, e)
+}
+
+// Position returns line and column number of given index of the original input.
+// It will panic if index is out of range.
+func (d *Decoder) Position(idx int) (line int, column int) {
+ b := d.orig[:idx]
+ line = bytes.Count(b, []byte("\n")) + 1
+ if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
+ b = b[i+1:]
+ }
+ column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
+ return line, column
+}
+
+// currPos returns the current index position of d.in from d.orig.
+func (d *Decoder) currPos() int {
+ return len(d.orig) - len(d.in)
+}
+
+// matchWithDelim matches s with the input b and verifies that the match
+// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]").
+// As a special case, EOF is considered a delimiter. It returns the length of s
+// if there is a match, else 0.
+func matchWithDelim(s string, b []byte) int {
+ if !bytes.HasPrefix(b, []byte(s)) {
+ return 0
+ }
+
+ n := len(s)
+ if n < len(b) && isNotDelim(b[n]) {
+ return 0
+ }
+ return n
+}
+
+// isNotDelim returns true if given byte is a not delimiter character.
+func isNotDelim(c byte) bool {
+ return (c == '-' || c == '+' || c == '.' || c == '_' ||
+ ('a' <= c && c <= 'z') ||
+ ('A' <= c && c <= 'Z') ||
+ ('0' <= c && c <= '9'))
+}
+
+// consume consumes n bytes of input and any subsequent whitespace.
+func (d *Decoder) consume(n int) {
+ d.in = d.in[n:]
+ for len(d.in) > 0 {
+ switch d.in[0] {
+ case ' ', '\n', '\r', '\t':
+ d.in = d.in[1:]
+ default:
+ return
+ }
+ }
+}
+
+// isValueNext returns true if next type should be a JSON value: Null,
+// Number, String or Bool.
+func (d *Decoder) isValueNext() bool {
+ if len(d.openStack) == 0 {
+ return d.lastToken.kind == 0
+ }
+
+ start := d.openStack[len(d.openStack)-1]
+ switch start {
+ case ObjectOpen:
+ return d.lastToken.kind&Name != 0
+ case ArrayOpen:
+ return d.lastToken.kind&(ArrayOpen|comma) != 0
+ }
+ panic(fmt.Sprintf(
+ "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v",
+ d.lastToken.kind, start))
+}
+
+// consumeToken constructs a Token for given Kind with raw value derived from
+// current d.in and given size, and consumes the given size-lenght of it.
+func (d *Decoder) consumeToken(kind Kind, size int) Token {
+ tok := Token{
+ kind: kind,
+ raw: d.in[:size],
+ pos: len(d.orig) - len(d.in),
+ }
+ d.consume(size)
+ return tok
+}
+
+// consumeBoolToken constructs a Token for a Bool kind with raw value derived from
+// current d.in and given size.
+func (d *Decoder) consumeBoolToken(b bool, size int) Token {
+ tok := Token{
+ kind: Bool,
+ raw: d.in[:size],
+ pos: len(d.orig) - len(d.in),
+ boo: b,
+ }
+ d.consume(size)
+ return tok
+}
+
+// consumeStringToken constructs a Token for a String kind with raw value derived
+// from current d.in and given size.
+func (d *Decoder) consumeStringToken(s string, size int) Token {
+ tok := Token{
+ kind: String,
+ raw: d.in[:size],
+ pos: len(d.orig) - len(d.in),
+ str: s,
+ }
+ d.consume(size)
+ return tok
+}
+
+// Clone returns a copy of the Decoder for use in reading ahead the next JSON
+// object, array or other values without affecting current Decoder.
+func (d *Decoder) Clone() *Decoder {
+ ret := *d
+ ret.openStack = append([]Kind(nil), ret.openStack...)
+ return &ret
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
new file mode 100644
index 000000000..2999d7133
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
@@ -0,0 +1,254 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "strconv"
+)
+
+// parseNumber reads the given []byte for a valid JSON number. If it is valid,
+// it returns the number of bytes. Parsing logic follows the definition in
+// https://tools.ietf.org/html/rfc7159#section-6, and is based off
+// encoding/json.isValidNumber function.
+func parseNumber(input []byte) (int, bool) {
+ var n int
+
+ s := input
+ if len(s) == 0 {
+ return 0, false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ n++
+ if len(s) == 0 {
+ return 0, false
+ }
+ }
+
+ // Digits
+ switch {
+ case s[0] == '0':
+ s = s[1:]
+ n++
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ n++
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+
+ default:
+ return 0, false
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ n += 2
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ n++
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ n++
+ if len(s) == 0 {
+ return 0, false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ }
+
+ // Check that next byte is a delimiter or it is at the end.
+ if n < len(input) && isNotDelim(input[n]) {
+ return 0, false
+ }
+
+ return n, true
+}
+
+// numberParts is the result of parsing out a valid JSON number. It contains
+// the parts of a number. The parts are used for integer conversion.
+type numberParts struct {
+ neg bool
+ intp []byte
+ frac []byte
+ exp []byte
+}
+
+// parseNumber constructs numberParts from given []byte. The logic here is
+// similar to consumeNumber above with the difference of having to construct
+// numberParts. The slice fields in numberParts are subslices of the input.
+func parseNumberParts(input []byte) (numberParts, bool) {
+ var neg bool
+ var intp []byte
+ var frac []byte
+ var exp []byte
+
+ s := input
+ if len(s) == 0 {
+ return numberParts{}, false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ if len(s) == 0 {
+ return numberParts{}, false
+ }
+ }
+
+ // Digits
+ switch {
+ case s[0] == '0':
+ // Skip first 0 and no need to store.
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ intp = s
+ n := 1
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ intp = intp[:n]
+
+ default:
+ return numberParts{}, false
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ frac = s[1:]
+ n := 1
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ frac = frac[:n]
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ exp = s
+ n := 0
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ n++
+ if len(s) == 0 {
+ return numberParts{}, false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ exp = exp[:n]
+ }
+
+ return numberParts{
+ neg: neg,
+ intp: intp,
+ frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right.
+ exp: exp,
+ }, true
+}
+
+// normalizeToIntString returns an integer string in normal form without the
+// E-notation for given numberParts. It will return false if it is not an
+// integer or if the exponent exceeds than max/min int value.
+func normalizeToIntString(n numberParts) (string, bool) {
+ intpSize := len(n.intp)
+ fracSize := len(n.frac)
+
+ if intpSize == 0 && fracSize == 0 {
+ return "0", true
+ }
+
+ var exp int
+ if len(n.exp) > 0 {
+ i, err := strconv.ParseInt(string(n.exp), 10, 32)
+ if err != nil {
+ return "", false
+ }
+ exp = int(i)
+ }
+
+ var num []byte
+ if exp >= 0 {
+ // For positive E, shift fraction digits into integer part and also pad
+ // with zeroes as needed.
+
+ // If there are more digits in fraction than the E value, then the
+ // number is not an integer.
+ if fracSize > exp {
+ return "", false
+ }
+
+ // Make sure resulting digits are within max value limit to avoid
+ // unnecessarily constructing a large byte slice that may simply fail
+ // later on.
+ const maxDigits = 20 // Max uint64 value has 20 decimal digits.
+ if intpSize+exp > maxDigits {
+ return "", false
+ }
+
+ // Set cap to make a copy of integer part when appended.
+ num = n.intp[:len(n.intp):len(n.intp)]
+ num = append(num, n.frac...)
+ for i := 0; i < exp-fracSize; i++ {
+ num = append(num, '0')
+ }
+ } else {
+ // For negative E, shift digits in integer part out.
+
+ // If there are fractions, then the number is not an integer.
+ if fracSize > 0 {
+ return "", false
+ }
+
+ // index is where the decimal point will be after adjusting for negative
+ // exponent.
+ index := intpSize + exp
+ if index < 0 {
+ return "", false
+ }
+
+ num = n.intp
+ // If any of the digits being shifted to the right of the decimal point
+ // is non-zero, then the number is not an integer.
+ for i := index; i < intpSize; i++ {
+ if num[i] != '0' {
+ return "", false
+ }
+ }
+ num = num[:index]
+ }
+
+ if n.neg {
+ return "-" + string(num), true
+ }
+ return string(num), true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
new file mode 100644
index 000000000..f7fea7d8d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
@@ -0,0 +1,91 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/strs"
+)
+
+func (d *Decoder) parseString(in []byte) (string, int, error) {
+ in0 := in
+ if len(in) == 0 {
+ return "", 0, ErrUnexpectedEOF
+ }
+ if in[0] != '"' {
+ return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0])
+ }
+ in = in[1:]
+ i := indexNeedEscapeInBytes(in)
+ in, out := in[i:], in[:i:i] // set cap to prevent mutations
+ for len(in) > 0 {
+ switch r, n := utf8.DecodeRune(in); {
+ case r == utf8.RuneError && n == 1:
+ return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string")
+ case r < ' ':
+ return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r)
+ case r == '"':
+ in = in[1:]
+ n := len(in0) - len(in)
+ return string(out), n, nil
+ case r == '\\':
+ if len(in) < 2 {
+ return "", 0, ErrUnexpectedEOF
+ }
+ switch r := in[1]; r {
+ case '"', '\\', '/':
+ in, out = in[2:], append(out, r)
+ case 'b':
+ in, out = in[2:], append(out, '\b')
+ case 'f':
+ in, out = in[2:], append(out, '\f')
+ case 'n':
+ in, out = in[2:], append(out, '\n')
+ case 'r':
+ in, out = in[2:], append(out, '\r')
+ case 't':
+ in, out = in[2:], append(out, '\t')
+ case 'u':
+ if len(in) < 6 {
+ return "", 0, ErrUnexpectedEOF
+ }
+ v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+ if err != nil {
+ return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
+ }
+ in = in[6:]
+
+ r := rune(v)
+ if utf16.IsSurrogate(r) {
+ if len(in) < 6 {
+ return "", 0, ErrUnexpectedEOF
+ }
+ v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+ r = utf16.DecodeRune(r, rune(v))
+ if in[0] != '\\' || in[1] != 'u' ||
+ r == unicode.ReplacementChar || err != nil {
+ return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
+ }
+ in = in[6:]
+ }
+ out = append(out, string(r)...)
+ default:
+ return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2])
+ }
+ default:
+ i := indexNeedEscapeInBytes(in[n:])
+ in, out = in[n+i:], append(out, in[:n+i]...)
+ }
+ }
+ return "", 0, ErrUnexpectedEOF
+}
+
+// indexNeedEscapeInBytes returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
new file mode 100644
index 000000000..50578d659
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
@@ -0,0 +1,192 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+// Kind represents a token kind expressible in the JSON format.
+type Kind uint16
+
+const (
+ Invalid Kind = (1 << iota) / 2
+ EOF
+ Null
+ Bool
+ Number
+ String
+ Name
+ ObjectOpen
+ ObjectClose
+ ArrayOpen
+ ArrayClose
+
+ // comma is only for parsing in between tokens and
+ // does not need to be exported.
+ comma
+)
+
+func (k Kind) String() string {
+ switch k {
+ case EOF:
+ return "eof"
+ case Null:
+ return "null"
+ case Bool:
+ return "bool"
+ case Number:
+ return "number"
+ case String:
+ return "string"
+ case ObjectOpen:
+ return "{"
+ case ObjectClose:
+ return "}"
+ case Name:
+ return "name"
+ case ArrayOpen:
+ return "["
+ case ArrayClose:
+ return "]"
+ case comma:
+ return ","
+ }
+ return "<invalid>"
+}
+
+// Token provides a parsed token kind and value.
+//
+// Values are provided by the difference accessor methods. The accessor methods
+// Name, Bool, and ParsedString will panic if called on the wrong kind. There
+// are different accessor methods for the Number kind for converting to the
+// appropriate Go numeric type and those methods have the ok return value.
+type Token struct {
+ // Token kind.
+ kind Kind
+ // pos provides the position of the token in the original input.
+ pos int
+ // raw bytes of the serialized token.
+ // This is a subslice into the original input.
+ raw []byte
+ // boo is parsed boolean value.
+ boo bool
+ // str is parsed string value.
+ str string
+}
+
+// Kind returns the token kind.
+func (t Token) Kind() Kind {
+ return t.kind
+}
+
+// RawString returns the read value in string.
+func (t Token) RawString() string {
+ return string(t.raw)
+}
+
+// Pos returns the token position from the input.
+func (t Token) Pos() int {
+ return t.pos
+}
+
+// Name returns the object name if token is Name, else it panics.
+func (t Token) Name() string {
+ if t.kind == Name {
+ return t.str
+ }
+ panic(fmt.Sprintf("Token is not a Name: %v", t.RawString()))
+}
+
+// Bool returns the bool value if token kind is Bool, else it panics.
+func (t Token) Bool() bool {
+ if t.kind == Bool {
+ return t.boo
+ }
+ panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString()))
+}
+
+// ParsedString returns the string value for a JSON string token or the read
+// value in string if token is not a string.
+func (t Token) ParsedString() string {
+ if t.kind == String {
+ return t.str
+ }
+ panic(fmt.Sprintf("Token is not a String: %v", t.RawString()))
+}
+
+// Float returns the floating-point number if token kind is Number.
+//
+// The floating-point precision is specified by the bitSize parameter: 32 for
+// float32 or 64 for float64. If bitSize=32, the result still has type float64,
+// but it will be convertible to float32 without changing its value. It will
+// return false if the number exceeds the floating point limits for given
+// bitSize.
+func (t Token) Float(bitSize int) (float64, bool) {
+ if t.kind != Number {
+ return 0, false
+ }
+ f, err := strconv.ParseFloat(t.RawString(), bitSize)
+ if err != nil {
+ return 0, false
+ }
+ return f, true
+}
+
+// Int returns the signed integer number if token is Number.
+//
+// The given bitSize specifies the integer type that the result must fit into.
+// It returns false if the number is not an integer value or if the result
+// exceeds the limits for given bitSize.
+func (t Token) Int(bitSize int) (int64, bool) {
+ s, ok := t.getIntStr()
+ if !ok {
+ return 0, false
+ }
+ n, err := strconv.ParseInt(s, 10, bitSize)
+ if err != nil {
+ return 0, false
+ }
+ return n, true
+}
+
+// Uint returns the signed integer number if token is Number.
+//
+// The given bitSize specifies the unsigned integer type that the result must
+// fit into. It returns false if the number is not an unsigned integer value
+// or if the result exceeds the limits for given bitSize.
+func (t Token) Uint(bitSize int) (uint64, bool) {
+ s, ok := t.getIntStr()
+ if !ok {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(s, 10, bitSize)
+ if err != nil {
+ return 0, false
+ }
+ return n, true
+}
+
+func (t Token) getIntStr() (string, bool) {
+ if t.kind != Number {
+ return "", false
+ }
+ parts, ok := parseNumberParts(t.raw)
+ if !ok {
+ return "", false
+ }
+ return normalizeToIntString(parts)
+}
+
+// TokenEquals returns true if given Tokens are equal, else false.
+func TokenEquals(x, y Token) bool {
+ return x.kind == y.kind &&
+ x.pos == y.pos &&
+ bytes.Equal(x.raw, y.raw) &&
+ x.boo == y.boo &&
+ x.str == y.str
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
new file mode 100644
index 000000000..fbdf34873
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
@@ -0,0 +1,276 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "math"
+ "math/bits"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// kind represents an encoding type.
+type kind uint8
+
+const (
+ _ kind = (1 << iota) / 2
+ name
+ scalar
+ objectOpen
+ objectClose
+ arrayOpen
+ arrayClose
+)
+
+// Encoder provides methods to write out JSON constructs and values. The user is
+// responsible for producing valid sequences of JSON constructs and values.
+type Encoder struct {
+ indent string
+ lastKind kind
+ indents []byte
+ out []byte
+}
+
+// NewEncoder returns an Encoder.
+//
+// If indent is a non-empty string, it causes every entry for an Array or Object
+// to be preceded by the indent and trailed by a newline.
+func NewEncoder(indent string) (*Encoder, error) {
+ e := &Encoder{}
+ if len(indent) > 0 {
+ if strings.Trim(indent, " \t") != "" {
+ return nil, errors.New("indent may only be composed of space or tab characters")
+ }
+ e.indent = indent
+ }
+ return e, nil
+}
+
+// Bytes returns the content of the written bytes.
+func (e *Encoder) Bytes() []byte {
+ return e.out
+}
+
+// WriteNull writes out the null value.
+func (e *Encoder) WriteNull() {
+ e.prepareNext(scalar)
+ e.out = append(e.out, "null"...)
+}
+
+// WriteBool writes out the given boolean value.
+func (e *Encoder) WriteBool(b bool) {
+ e.prepareNext(scalar)
+ if b {
+ e.out = append(e.out, "true"...)
+ } else {
+ e.out = append(e.out, "false"...)
+ }
+}
+
+// WriteString writes out the given string in JSON string value. Returns error
+// if input string contains invalid UTF-8.
+func (e *Encoder) WriteString(s string) error {
+ e.prepareNext(scalar)
+ var err error
+ if e.out, err = appendString(e.out, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Sentinel error used for indicating invalid UTF-8.
+var errInvalidUTF8 = errors.New("invalid UTF-8")
+
+func appendString(out []byte, in string) ([]byte, error) {
+ out = append(out, '"')
+ i := indexNeedEscapeInString(in)
+ in, out = in[i:], append(out, in[:i]...)
+ for len(in) > 0 {
+ switch r, n := utf8.DecodeRuneInString(in); {
+ case r == utf8.RuneError && n == 1:
+ return out, errInvalidUTF8
+ case r < ' ' || r == '"' || r == '\\':
+ out = append(out, '\\')
+ switch r {
+ case '"', '\\':
+ out = append(out, byte(r))
+ case '\b':
+ out = append(out, 'b')
+ case '\f':
+ out = append(out, 'f')
+ case '\n':
+ out = append(out, 'n')
+ case '\r':
+ out = append(out, 'r')
+ case '\t':
+ out = append(out, 't')
+ default:
+ out = append(out, 'u')
+ out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ }
+ in = in[n:]
+ default:
+ i := indexNeedEscapeInString(in[n:])
+ in, out = in[n+i:], append(out, in[:n+i]...)
+ }
+ }
+ out = append(out, '"')
+ return out, nil
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInString(s string) int {
+ for i, r := range s {
+ if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError {
+ return i
+ }
+ }
+ return len(s)
+}
+
+// WriteFloat writes out the given float and bitSize in JSON number value.
+func (e *Encoder) WriteFloat(n float64, bitSize int) {
+ e.prepareNext(scalar)
+ e.out = appendFloat(e.out, n, bitSize)
+}
+
+// appendFloat formats given float in bitSize, and appends to the given []byte.
+func appendFloat(out []byte, n float64, bitSize int) []byte {
+ switch {
+ case math.IsNaN(n):
+ return append(out, `"NaN"`...)
+ case math.IsInf(n, +1):
+ return append(out, `"Infinity"`...)
+ case math.IsInf(n, -1):
+ return append(out, `"-Infinity"`...)
+ }
+
+ // JSON number formatting logic based on encoding/json.
+ // See floatEncoder.encode for reference.
+ fmt := byte('f')
+ if abs := math.Abs(n); abs != 0 {
+ if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) ||
+ bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
+ fmt = 'e'
+ }
+ }
+ out = strconv.AppendFloat(out, n, fmt, -1, bitSize)
+ if fmt == 'e' {
+ n := len(out)
+ if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' {
+ out[n-2] = out[n-1]
+ out = out[:n-1]
+ }
+ }
+ return out
+}
+
+// WriteInt writes out the given signed integer in JSON number value.
+func (e *Encoder) WriteInt(n int64) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, strconv.FormatInt(n, 10)...)
+}
+
+// WriteUint writes out the given unsigned integer in JSON number value.
+func (e *Encoder) WriteUint(n uint64) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, strconv.FormatUint(n, 10)...)
+}
+
+// StartObject writes out the '{' symbol.
+func (e *Encoder) StartObject() {
+ e.prepareNext(objectOpen)
+ e.out = append(e.out, '{')
+}
+
+// EndObject writes out the '}' symbol.
+func (e *Encoder) EndObject() {
+ e.prepareNext(objectClose)
+ e.out = append(e.out, '}')
+}
+
+// WriteName writes out the given string in JSON string value and the name
+// separator ':'. Returns error if input string contains invalid UTF-8, which
+// should not be likely as protobuf field names should be valid.
+func (e *Encoder) WriteName(s string) error {
+ e.prepareNext(name)
+ var err error
+ // Append to output regardless of error.
+ e.out, err = appendString(e.out, s)
+ e.out = append(e.out, ':')
+ return err
+}
+
+// StartArray writes out the '[' symbol.
+func (e *Encoder) StartArray() {
+ e.prepareNext(arrayOpen)
+ e.out = append(e.out, '[')
+}
+
+// EndArray writes out the ']' symbol.
+func (e *Encoder) EndArray() {
+ e.prepareNext(arrayClose)
+ e.out = append(e.out, ']')
+}
+
+// prepareNext adds possible comma and indentation for the next value based
+// on last type and indent option. It also updates lastKind to next.
+func (e *Encoder) prepareNext(next kind) {
+ defer func() {
+ // Set lastKind to next.
+ e.lastKind = next
+ }()
+
+ if len(e.indent) == 0 {
+ // Need to add comma on the following condition.
+ if e.lastKind&(scalar|objectClose|arrayClose) != 0 &&
+ next&(name|scalar|objectOpen|arrayOpen) != 0 {
+ e.out = append(e.out, ',')
+ // For single-line output, add a random extra space after each
+ // comma to make output unstable.
+ if detrand.Bool() {
+ e.out = append(e.out, ' ')
+ }
+ }
+ return
+ }
+
+ switch {
+ case e.lastKind&(objectOpen|arrayOpen) != 0:
+ // If next type is NOT closing, add indent and newline.
+ if next&(objectClose|arrayClose) == 0 {
+ e.indents = append(e.indents, e.indent...)
+ e.out = append(e.out, '\n')
+ e.out = append(e.out, e.indents...)
+ }
+
+ case e.lastKind&(scalar|objectClose|arrayClose) != 0:
+ switch {
+ // If next type is either a value or name, add comma and newline.
+ case next&(name|scalar|objectOpen|arrayOpen) != 0:
+ e.out = append(e.out, ',', '\n')
+
+ // If next type is a closing object or array, adjust indentation.
+ case next&(objectClose|arrayClose) != 0:
+ e.indents = e.indents[:len(e.indents)-len(e.indent)]
+ e.out = append(e.out, '\n')
+ }
+ e.out = append(e.out, e.indents...)
+
+ case e.lastKind&name != 0:
+ e.out = append(e.out, ' ')
+ // For multi-line output, add a random extra space after key: to make
+ // output unstable.
+ if detrand.Bool() {
+ e.out = append(e.out, ' ')
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
new file mode 100644
index 000000000..e7fcea31f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -0,0 +1,168 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/empty.proto
+
+package emptypb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+// service Foo {
+// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+// }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+type Empty struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *Empty) Reset() {
+ *x = Empty{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_empty_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Empty) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Empty) ProtoMessage() {}
+
+func (x *Empty) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_empty_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0}
+}
+
+var File_google_protobuf_empty_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_empty_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
+ 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
+ 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
+ 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_empty_proto_rawDescOnce sync.Once
+ file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc
+)
+
+func file_google_protobuf_empty_proto_rawDescGZIP() []byte {
+ file_google_protobuf_empty_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData)
+ })
+ return file_google_protobuf_empty_proto_rawDescData
+}
+
+var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_empty_proto_goTypes = []interface{}{
+ (*Empty)(nil), // 0: google.protobuf.Empty
+}
+var file_google_protobuf_empty_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_empty_proto_init() }
+func file_google_protobuf_empty_proto_init() {
+ if File_google_protobuf_empty_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Empty); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_empty_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_empty_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_empty_proto_depIdxs,
+ MessageInfos: file_google_protobuf_empty_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_empty_proto = out.File
+ file_google_protobuf_empty_proto_rawDesc = nil
+ file_google_protobuf_empty_proto_goTypes = nil
+ file_google_protobuf_empty_proto_depIdxs = nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/.travis.yml b/vendor/gopkg.in/square/go-jose.v2/.travis.yml
index ae69862df..391b99a40 100644
--- a/vendor/gopkg.in/square/go-jose.v2/.travis.yml
+++ b/vendor/gopkg.in/square/go-jose.v2/.travis.yml
@@ -8,8 +8,8 @@ matrix:
- go: tip
go:
-- '1.11.x'
-- '1.12.x'
+- '1.14.x'
+- '1.15.x'
- tip
go_import_path: gopkg.in/square/go-jose.v2
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
index 126b85ce2..f6465c041 100644
--- a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
@@ -150,7 +150,7 @@ func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
return hmac.Sum(nil)[:ctx.authtagBytes]
}
-// resize ensures the the given slice has a capacity of at least n bytes.
+// resize ensures that the given slice has a capacity of at least n bytes.
// If the capacity of the slice is less than n, a new slice is allocated
// and the existing data will be copied.
func resize(in []byte, n uint64) (head, tail []byte) {
diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go
index d24cabf6b..be7433e28 100644
--- a/vendor/gopkg.in/square/go-jose.v2/crypter.go
+++ b/vendor/gopkg.in/square/go-jose.v2/crypter.go
@@ -216,6 +216,7 @@ func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *Encrypter
if opts != nil {
encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
}
for _, recipient := range rcpts {
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/decode.go b/vendor/gopkg.in/square/go-jose.v2/json/decode.go
index 37457e5a8..4dbc4146c 100644
--- a/vendor/gopkg.in/square/go-jose.v2/json/decode.go
+++ b/vendor/gopkg.in/square/go-jose.v2/json/decode.go
@@ -13,6 +13,7 @@ import (
"encoding/base64"
"errors"
"fmt"
+ "math"
"reflect"
"runtime"
"strconv"
@@ -245,6 +246,18 @@ func isValidNumber(s string) bool {
return s == ""
}
+type NumberUnmarshalType int
+
+const (
+ // unmarshal a JSON number into an interface{} as a float64
+ UnmarshalFloat NumberUnmarshalType = iota
+ // unmarshal a JSON number into an interface{} as a `json.Number`
+ UnmarshalJSONNumber
+ // unmarshal a JSON number into an interface{} as a int64
+ // if value is an integer otherwise float64
+ UnmarshalIntOrFloat
+)
+
// decodeState represents the state while decoding a JSON value.
type decodeState struct {
data []byte
@@ -252,7 +265,7 @@ type decodeState struct {
scan scanner
nextscan scanner // for calls to nextValue
savedError error
- useNumber bool
+ numberType NumberUnmarshalType
}
// errPhase is used for errors that should not happen unless
@@ -723,17 +736,38 @@ func (d *decodeState) literal(v reflect.Value) {
d.literalStore(d.data[start:d.off], v, false)
}
-// convertNumber converts the number literal s to a float64 or a Number
-// depending on the setting of d.useNumber.
+// convertNumber converts the number literal s to a float64, int64 or a Number
+// depending on d.numberDecodeType.
func (d *decodeState) convertNumber(s string) (interface{}, error) {
- if d.useNumber {
+ switch d.numberType {
+
+ case UnmarshalJSONNumber:
return Number(s), nil
+ case UnmarshalIntOrFloat:
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err == nil {
+ return v, nil
+ }
+
+ // tries to parse integer number in scientific notation
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+
+ // if it has no decimal value use int64
+ if fi, fd := math.Modf(f); fd == 0.0 {
+ return int64(fi), nil
+ }
+ return f, nil
+ default:
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
}
- f, err := strconv.ParseFloat(s, 64)
- if err != nil {
- return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
- }
- return f, nil
+
}
var numberType = reflect.TypeOf(Number(""))
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/stream.go b/vendor/gopkg.in/square/go-jose.v2/json/stream.go
index 8ddcf4d27..9b2b926b0 100644
--- a/vendor/gopkg.in/square/go-jose.v2/json/stream.go
+++ b/vendor/gopkg.in/square/go-jose.v2/json/stream.go
@@ -31,9 +31,14 @@ func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
+// Deprecated: Use `SetNumberType` instead
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
-func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+func (dec *Decoder) UseNumber() { dec.d.numberType = UnmarshalJSONNumber }
+
+// SetNumberType causes the Decoder to unmarshal a number into an interface{} as a
+// Number, float64 or int64 depending on `t` enum value.
+func (dec *Decoder) SetNumberType(t NumberUnmarshalType) { dec.d.numberType = t }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
diff --git a/vendor/gopkg.in/square/go-jose.v2/jwk.go b/vendor/gopkg.in/square/go-jose.v2/jwk.go
index 2dc6aec4b..222e260cb 100644
--- a/vendor/gopkg.in/square/go-jose.v2/jwk.go
+++ b/vendor/gopkg.in/square/go-jose.v2/jwk.go
@@ -238,7 +238,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
if certPub != nil && keyPub != nil {
if !reflect.DeepEqual(certPub, keyPub) {
- return errors.New("square/go-jose: invalid JWK, public keys in key and x5c fields to not match")
+ return errors.New("square/go-jose: invalid JWK, public keys in key and x5c fields do not match")
}
}
@@ -332,7 +332,7 @@ func (s *JSONWebKeySet) Key(kid string) []JSONWebKey {
const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
-const edThumbprintTemplate = `{"crv":"%s","kty":"OKP",x":"%s"}`
+const edThumbprintTemplate = `{"crv":"%s","kty":"OKP","x":"%s"}`
func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
coordLength := curveSize(curve)
@@ -406,7 +406,7 @@ func (k *JSONWebKey) IsPublic() bool {
}
}
-// Public creates JSONWebKey with corresponding publik key if JWK represents asymmetric private key.
+// Public creates JSONWebKey with corresponding public key if JWK represents asymmetric private key.
func (k *JSONWebKey) Public() JSONWebKey {
if k.IsPublic() {
return *k
diff --git a/vendor/gopkg.in/square/go-jose.v2/opaque.go b/vendor/gopkg.in/square/go-jose.v2/opaque.go
index df747f992..fc3e8d2ef 100644
--- a/vendor/gopkg.in/square/go-jose.v2/opaque.go
+++ b/vendor/gopkg.in/square/go-jose.v2/opaque.go
@@ -17,7 +17,7 @@
package jose
// OpaqueSigner is an interface that supports signing payloads with opaque
-// private key(s). Private key operations preformed by implementors may, for
+// private key(s). Private key operations performed by implementers may, for
// example, occur in a hardware module. An OpaqueSigner may rotate signing keys
// transparently to the user of this interface.
type OpaqueSigner interface {
diff --git a/vendor/gopkg.in/square/go-jose.v2/shared.go b/vendor/gopkg.in/square/go-jose.v2/shared.go
index f8438641f..f72e5a53d 100644
--- a/vendor/gopkg.in/square/go-jose.v2/shared.go
+++ b/vendor/gopkg.in/square/go-jose.v2/shared.go
@@ -183,7 +183,7 @@ type Header struct {
// Unverified certificate chain parsed from x5c header.
certificates []*x509.Certificate
- // Any headers not recognised above get unmarshaled
+ // Any headers not recognised above get unmarshalled
// from JSON in a generic manner and placed in this map.
ExtraHeaders map[HeaderKey]interface{}
}
@@ -295,12 +295,12 @@ func (parsed rawHeader) getAPV() (*byteBuffer, error) {
return parsed.getByteBuffer(headerAPV)
}
-// getIV extracts parsed "iv" frpom the raw JSON.
+// getIV extracts parsed "iv" from the raw JSON.
func (parsed rawHeader) getIV() (*byteBuffer, error) {
return parsed.getByteBuffer(headerIV)
}
-// getTag extracts parsed "tag" frpom the raw JSON.
+// getTag extracts parsed "tag" from the raw JSON.
func (parsed rawHeader) getTag() (*byteBuffer, error) {
return parsed.getByteBuffer(headerTag)
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index bd6f81903..3da12dbd8 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -73,7 +73,7 @@ github.com/containerd/containerd/log
github.com/containerd/containerd/pkg/userns
github.com/containerd/containerd/platforms
github.com/containerd/containerd/sys
-# github.com/containerd/stargz-snapshotter/estargz v0.11.4
+# github.com/containerd/stargz-snapshotter/estargz v0.12.0
github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil
# github.com/containernetworking/cni v1.1.1
@@ -91,7 +91,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.1.1
## explicit
github.com/containernetworking/plugins/pkg/ns
-# github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c
+# github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab
## explicit
github.com/containers/buildah
github.com/containers/buildah/bind
@@ -107,13 +107,14 @@ github.com/containers/buildah/pkg/blobcache
github.com/containers/buildah/pkg/chrootuser
github.com/containers/buildah/pkg/cli
github.com/containers/buildah/pkg/completion
+github.com/containers/buildah/pkg/jail
github.com/containers/buildah/pkg/overlay
github.com/containers/buildah/pkg/parse
github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9
+# github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/define
@@ -138,6 +139,10 @@ github.com/containers/common/pkg/config
github.com/containers/common/pkg/download
github.com/containers/common/pkg/filters
github.com/containers/common/pkg/flag
+github.com/containers/common/pkg/hooks
+github.com/containers/common/pkg/hooks/0.1.0
+github.com/containers/common/pkg/hooks/1.0.0
+github.com/containers/common/pkg/hooks/exec
github.com/containers/common/pkg/machine
github.com/containers/common/pkg/manifests
github.com/containers/common/pkg/netns
@@ -162,7 +167,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c
+# github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c
## explicit
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@@ -177,15 +182,21 @@ github.com/containers/image/v5/image
github.com/containers/image/v5/internal/blobinfocache
github.com/containers/image/v5/internal/image
github.com/containers/image/v5/internal/imagedestination
+github.com/containers/image/v5/internal/imagedestination/impl
+github.com/containers/image/v5/internal/imagedestination/stubs
github.com/containers/image/v5/internal/imagesource
+github.com/containers/image/v5/internal/imagesource/impl
+github.com/containers/image/v5/internal/imagesource/stubs
github.com/containers/image/v5/internal/iolimits
github.com/containers/image/v5/internal/manifest
github.com/containers/image/v5/internal/pkg/platform
github.com/containers/image/v5/internal/private
github.com/containers/image/v5/internal/putblobdigest
github.com/containers/image/v5/internal/rootless
+github.com/containers/image/v5/internal/signature
github.com/containers/image/v5/internal/streamdigest
github.com/containers/image/v5/internal/tmpdir
+github.com/containers/image/v5/internal/unparsedimage
github.com/containers/image/v5/internal/uploadreader
github.com/containers/image/v5/manifest
github.com/containers/image/v5/oci/archive
@@ -209,6 +220,8 @@ github.com/containers/image/v5/pkg/sysregistriesv2
github.com/containers/image/v5/pkg/tlsclientconfig
github.com/containers/image/v5/sif
github.com/containers/image/v5/signature
+github.com/containers/image/v5/signature/internal
+github.com/containers/image/v5/signature/sigstore
github.com/containers/image/v5/storage
github.com/containers/image/v5/tarball
github.com/containers/image/v5/transports
@@ -244,7 +257,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35
+# github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a
## explicit
github.com/containers/storage
github.com/containers/storage/drivers
@@ -290,7 +303,7 @@ github.com/containers/storage/pkg/tarlog
github.com/containers/storage/pkg/truncindex
github.com/containers/storage/pkg/unshare
github.com/containers/storage/types
-# github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a
+# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/go-systemd/activation
# github.com/coreos/go-systemd/v22 v22.3.2
## explicit
@@ -415,11 +428,14 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor
# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/golang/groupcache/lru
# github.com/golang/protobuf v1.5.2
+github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
+# github.com/google/go-containerregistry v0.10.0
+github.com/google/go-containerregistry/pkg/name
# github.com/google/go-intervals v0.0.2
github.com/google/go-intervals/intervalset
# github.com/google/gofuzz v1.2.0
@@ -455,7 +471,7 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit
github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.6
+# github.com/klauspost/compress v1.15.8
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -466,6 +482,16 @@ github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
# github.com/klauspost/pgzip v1.2.5
github.com/klauspost/pgzip
+# github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e
+github.com/letsencrypt/boulder/core
+github.com/letsencrypt/boulder/core/proto
+github.com/letsencrypt/boulder/errors
+github.com/letsencrypt/boulder/features
+github.com/letsencrypt/boulder/goodkey
+github.com/letsencrypt/boulder/identifier
+github.com/letsencrypt/boulder/probs
+github.com/letsencrypt/boulder/revocation
+github.com/letsencrypt/boulder/sa/proto
# github.com/manifoldco/promptui v0.9.0
github.com/manifoldco/promptui
github.com/manifoldco/promptui/list
@@ -587,7 +613,7 @@ github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalk
github.com/opencontainers/selinux/pkg/pwalkdir
-# github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805
+# github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df
github.com/openshift/imagebuilder
github.com/openshift/imagebuilder/dockerfile/command
github.com/openshift/imagebuilder/dockerfile/parser
@@ -601,15 +627,15 @@ github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
-# github.com/proglottis/gpgme v0.1.2
+# github.com/proglottis/gpgme v0.1.3
github.com/proglottis/gpgme
-# github.com/prometheus/client_golang v1.11.1
+# github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.2.0
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.30.0
+# github.com/prometheus/common v0.32.1
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
@@ -635,6 +661,11 @@ github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy
github.com/rootless-containers/rootlesskit/pkg/port/portutil
# github.com/seccomp/libseccomp-golang v0.10.0
github.com/seccomp/libseccomp-golang
+# github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1
+github.com/sigstore/sigstore/pkg/cryptoutils
+github.com/sigstore/sigstore/pkg/signature
+github.com/sigstore/sigstore/pkg/signature/options
+github.com/sigstore/sigstore/pkg/signature/payload
# github.com/sirupsen/logrus v1.8.1
## explicit
github.com/sirupsen/logrus
@@ -658,6 +689,10 @@ github.com/sylabs/sif/v2/pkg/sif
github.com/syndtr/gocapability/capability
# github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tchap/go-patricia/patricia
+# github.com/theupdateframework/go-tuf v0.3.0
+github.com/theupdateframework/go-tuf/encrypted
+# github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399
+github.com/titanous/rocacheck
# github.com/uber/jaeger-client-go v2.30.0+incompatible
## explicit
github.com/uber/jaeger-client-go/log
@@ -715,6 +750,8 @@ golang.org/x/crypto/curve25519/internal/field
golang.org/x/crypto/ed25519
golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/internal/subtle
+golang.org/x/crypto/nacl/secretbox
+golang.org/x/crypto/ocsp
golang.org/x/crypto/openpgp
golang.org/x/crypto/openpgp/armor
golang.org/x/crypto/openpgp/elgamal
@@ -722,11 +759,14 @@ golang.org/x/crypto/openpgp/errors
golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
golang.org/x/crypto/pbkdf2
+golang.org/x/crypto/salsa20/salsa
+golang.org/x/crypto/scrypt
+golang.org/x/crypto/sha3
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
-# golang.org/x/net v0.0.0-20220225172249-27dd8689420f
+# golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
golang.org/x/net/context
golang.org/x/net/html
golang.org/x/net/html/atom
@@ -737,11 +777,11 @@ golang.org/x/net/http2/hpack
golang.org/x/net/idna
golang.org/x/net/internal/timeseries
golang.org/x/net/trace
-# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
+# golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
+# golang.org/x/sys v0.0.0-20220624220833-87e55d714810
## explicit
golang.org/x/sys/cpu
golang.org/x/sys/execabs
@@ -778,9 +818,9 @@ golang.org/x/text/unicode/norm
# golang.org/x/tools v0.1.10
golang.org/x/tools/go/ast/inspector
golang.org/x/tools/internal/typeparams
-# google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8
+# google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.44.0
+# google.golang.org/grpc v1.47.0
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
@@ -789,6 +829,7 @@ google.golang.org/grpc/balancer/base
google.golang.org/grpc/balancer/grpclb/state
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
+google.golang.org/grpc/channelz
google.golang.org/grpc/codes
google.golang.org/grpc/connectivity
google.golang.org/grpc/credentials
@@ -798,6 +839,7 @@ google.golang.org/grpc/encoding/proto
google.golang.org/grpc/grpclog
google.golang.org/grpc/internal
google.golang.org/grpc/internal/backoff
+google.golang.org/grpc/internal/balancer/gracefulswitch
google.golang.org/grpc/internal/balancerload
google.golang.org/grpc/internal/binarylog
google.golang.org/grpc/internal/buffer
@@ -809,6 +851,7 @@ google.golang.org/grpc/internal/grpcrand
google.golang.org/grpc/internal/grpcsync
google.golang.org/grpc/internal/grpcutil
google.golang.org/grpc/internal/metadata
+google.golang.org/grpc/internal/pretty
google.golang.org/grpc/internal/resolver
google.golang.org/grpc/internal/resolver/dns
google.golang.org/grpc/internal/resolver/passthrough
@@ -828,12 +871,14 @@ google.golang.org/grpc/status
google.golang.org/grpc/tap
# google.golang.org/protobuf v1.28.0
## explicit
+google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
google.golang.org/protobuf/internal/descfmt
google.golang.org/protobuf/internal/descopts
google.golang.org/protobuf/internal/detrand
google.golang.org/protobuf/internal/encoding/defval
+google.golang.org/protobuf/internal/encoding/json
google.golang.org/protobuf/internal/encoding/messageset
google.golang.org/protobuf/internal/encoding/tag
google.golang.org/protobuf/internal/encoding/text
@@ -857,11 +902,12 @@ google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/descriptorpb
google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/durationpb
+google.golang.org/protobuf/types/known/emptypb
google.golang.org/protobuf/types/known/timestamppb
# gopkg.in/inf.v0 v0.9.1
## explicit
gopkg.in/inf.v0
-# gopkg.in/square/go-jose.v2 v2.5.1
+# gopkg.in/square/go-jose.v2 v2.6.0
gopkg.in/square/go-jose.v2
gopkg.in/square/go-jose.v2/cipher
gopkg.in/square/go-jose.v2/json