From a4a70b4506ec4abb8b3bbc3873ee5ca015a8ed08 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Thu, 24 Oct 2019 10:37:22 -0400 Subject: bump containers/image to v5.0.0, buildah to v1.11.4 Move to containers/image v5 and containers/buildah to v1.11.4. Replace an equality check with a type assertion when checking for a docker.ErrUnauthorizedForCredentials in `podman login`. Signed-off-by: Nalin Dahyabhai --- vendor/github.com/containerd/containerd/LICENSE | 191 +++ vendor/github.com/containerd/containerd/NOTICE | 16 + .../containerd/containerd/errdefs/errors.go | 93 ++ .../containerd/containerd/errdefs/grpc.go | 147 +++ vendor/github.com/containerd/continuity/fs/copy.go | 172 +++ .../containerd/continuity/fs/copy_linux.go | 144 +++ .../containerd/continuity/fs/copy_unix.go | 112 ++ .../containerd/continuity/fs/copy_windows.go | 49 + vendor/github.com/containerd/continuity/fs/diff.go | 326 +++++ .../containerd/continuity/fs/diff_unix.go | 74 ++ .../containerd/continuity/fs/diff_windows.go | 48 + .../containerd/continuity/fs/dtype_linux.go | 103 ++ vendor/github.com/containerd/continuity/fs/du.go | 38 + .../github.com/containerd/continuity/fs/du_unix.go | 110 ++ .../containerd/continuity/fs/du_windows.go | 82 ++ .../containerd/continuity/fs/hardlink.go | 43 + .../containerd/continuity/fs/hardlink_unix.go | 34 + .../containerd/continuity/fs/hardlink_windows.go | 23 + vendor/github.com/containerd/continuity/fs/path.go | 313 +++++ .../containerd/continuity/fs/stat_bsd.go | 44 + .../containerd/continuity/fs/stat_linux.go | 43 + vendor/github.com/containerd/continuity/fs/time.go | 29 + .../continuity/pathdriver/path_driver.go | 101 -- .../containerd/continuity/syscallx/syscall_unix.go | 26 + .../continuity/syscallx/syscall_windows.go | 112 ++ .../containerd/continuity/sysx/README.md | 3 + .../containerd/continuity/sysx/file_posix.go | 128 ++ .../containerd/continuity/sysx/generate.sh | 52 + .../containerd/continuity/sysx/nodata_linux.go | 23 + .../containerd/continuity/sysx/nodata_solaris.go | 24 + .../containerd/continuity/sysx/nodata_unix.go | 25 + .../github.com/containerd/continuity/sysx/xattr.go | 125 ++ .../continuity/sysx/xattr_unsupported.go | 67 + vendor/github.com/containers/buildah/CHANGELOG.md | 26 + vendor/github.com/containers/buildah/OWNERS | 22 + vendor/github.com/containers/buildah/buildah.go | 4 +- vendor/github.com/containers/buildah/changelog.txt | 22 + vendor/github.com/containers/buildah/commit.go | 20 +- vendor/github.com/containers/buildah/common.go | 4 +- vendor/github.com/containers/buildah/config.go | 6 +- .../github.com/containers/buildah/docker/types.go | 2 +- vendor/github.com/containers/buildah/go.mod | 10 +- vendor/github.com/containers/buildah/go.sum | 29 +- vendor/github.com/containers/buildah/image.go | 12 +- .../containers/buildah/imagebuildah/build.go | 4 +- .../containers/buildah/imagebuildah/executor.go | 10 +- .../buildah/imagebuildah/stage_executor.go | 23 +- .../containers/buildah/imagebuildah/util.go | 11 +- vendor/github.com/containers/buildah/import.go | 6 +- vendor/github.com/containers/buildah/new.go | 12 +- .../containers/buildah/pkg/blobcache/blobcache.go | 28 +- .../containers/buildah/pkg/cli/common.go | 13 +- .../containers/buildah/pkg/parse/parse.go | 8 +- .../containers/buildah/pkg/parse/parse_unix.go | 38 +- vendor/github.com/containers/buildah/pull.go | 24 +- vendor/github.com/containers/buildah/util.go | 6 +- vendor/github.com/containers/buildah/util/util.go | 12 +- vendor/github.com/containers/image/v4/LICENSE | 189 --- vendor/github.com/containers/image/v4/copy/copy.go | 975 --------------- .../containers/image/v4/copy/manifest.go | 121 -- .../containers/image/v4/copy/progress_reader.go | 28 - vendor/github.com/containers/image/v4/copy/sign.go | 31 - .../image/v4/directory/directory_dest.go | 260 ---- .../containers/image/v4/directory/directory_src.go | 96 -- .../image/v4/directory/directory_transport.go | 187 --- .../image/v4/directory/explicitfilepath/path.go | 56 - .../containers/image/v4/docker/archive/dest.go | 72 -- .../containers/image/v4/docker/archive/src.go | 40 - .../image/v4/docker/archive/transport.go | 160 --- .../github.com/containers/image/v4/docker/cache.go | 23 - .../containers/image/v4/docker/daemon/client.go | 85 -- .../image/v4/docker/daemon/daemon_dest.go | 144 --- .../image/v4/docker/daemon/daemon_src.go | 62 - .../image/v4/docker/daemon/daemon_transport.go | 223 ---- .../containers/image/v4/docker/docker_client.go | 645 ---------- .../containers/image/v4/docker/docker_image.go | 107 -- .../image/v4/docker/docker_image_dest.go | 611 --------- .../containers/image/v4/docker/docker_image_src.go | 451 ------- .../containers/image/v4/docker/docker_transport.go | 168 --- .../containers/image/v4/docker/lookaside.go | 202 --- .../image/v4/docker/policyconfiguration/naming.go | 56 - .../containers/image/v4/docker/reference/README.md | 2 - .../image/v4/docker/reference/helpers.go | 42 - .../image/v4/docker/reference/normalize.go | 181 --- .../image/v4/docker/reference/reference.go | 433 ------- .../containers/image/v4/docker/reference/regexp.go | 143 --- .../containers/image/v4/docker/tarfile/dest.go | 407 ------ .../containers/image/v4/docker/tarfile/doc.go | 3 - .../containers/image/v4/docker/tarfile/src.go | 478 -------- .../containers/image/v4/docker/tarfile/types.go | 28 - .../containers/image/v4/docker/wwwauthenticate.go | 159 --- .../containers/image/v4/image/docker_list.go | 94 -- .../containers/image/v4/image/docker_schema1.go | 202 --- .../containers/image/v4/image/docker_schema2.go | 357 ------ .../containers/image/v4/image/manifest.go | 73 -- .../github.com/containers/image/v4/image/memory.go | 65 - vendor/github.com/containers/image/v4/image/oci.go | 214 ---- .../containers/image/v4/image/sourced.go | 104 -- .../containers/image/v4/image/unparsed.go | 95 -- .../containers/image/v4/internal/pkg/keyctl/key.go | 73 -- .../image/v4/internal/pkg/keyctl/keyring.go | 120 -- .../image/v4/internal/pkg/keyctl/perm.go | 33 - .../image/v4/internal/pkg/keyctl/sys_linux.go | 25 - .../containers/image/v4/internal/tmpdir/tmpdir.go | 29 - .../containers/image/v4/manifest/docker_schema1.go | 316 ----- .../containers/image/v4/manifest/docker_schema2.go | 349 ------ .../containers/image/v4/manifest/manifest.go | 257 ---- .../github.com/containers/image/v4/manifest/oci.go | 243 ---- .../containers/image/v4/oci/archive/oci_dest.go | 151 --- .../containers/image/v4/oci/archive/oci_src.go | 102 -- .../image/v4/oci/archive/oci_transport.go | 192 --- .../containers/image/v4/oci/internal/oci_util.go | 126 -- .../containers/image/v4/oci/layout/oci_dest.go | 306 ----- .../containers/image/v4/oci/layout/oci_src.go | 171 --- .../image/v4/oci/layout/oci_transport.go | 264 ---- .../image/v4/openshift/openshift-copies.go | 1170 ------------------ .../containers/image/v4/openshift/openshift.go | 562 --------- .../image/v4/openshift/openshift_transport.go | 157 --- .../containers/image/v4/ostree/ostree_dest.go | 504 -------- .../containers/image/v4/ostree/ostree_src.go | 416 ------- .../containers/image/v4/ostree/ostree_transport.go | 252 ---- .../image/v4/pkg/blobinfocache/boltdb/boltdb.go | 332 ----- .../image/v4/pkg/blobinfocache/default.go | 75 -- .../internal/prioritize/prioritize.go | 110 -- .../image/v4/pkg/blobinfocache/memory/memory.go | 145 --- .../image/v4/pkg/blobinfocache/none/none.go | 49 - .../image/v4/pkg/compression/compression.go | 149 --- .../image/v4/pkg/compression/internal/types.go | 57 - .../image/v4/pkg/compression/types/types.go | 13 - .../containers/image/v4/pkg/compression/zstd.go | 59 - .../image/v4/pkg/docker/config/config.go | 352 ------ .../image/v4/pkg/docker/config/config_linux.go | 115 -- .../v4/pkg/docker/config/config_unsupported.go | 20 - .../containers/image/v4/pkg/strslice/README.md | 1 - .../containers/image/v4/pkg/strslice/strslice.go | 30 - .../v4/pkg/sysregistriesv2/system_registries_v2.go | 483 -------- .../v4/pkg/tlsclientconfig/tlsclientconfig.go | 112 -- .../containers/image/v4/signature/docker.go | 65 - .../containers/image/v4/signature/json.go | 88 -- .../containers/image/v4/signature/mechanism.go | 85 -- .../image/v4/signature/mechanism_gpgme.go | 175 --- .../image/v4/signature/mechanism_openpgp.go | 159 --- .../containers/image/v4/signature/policy_config.go | 688 ----------- .../containers/image/v4/signature/policy_eval.go | 289 ----- .../image/v4/signature/policy_eval_baselayer.go | 20 - .../image/v4/signature/policy_eval_signedby.go | 131 -- .../image/v4/signature/policy_eval_simple.go | 29 - .../image/v4/signature/policy_reference_match.go | 101 -- .../containers/image/v4/signature/policy_types.go | 152 --- .../containers/image/v4/signature/signature.go | 280 ----- .../containers/image/v4/storage/storage_image.go | 956 --------------- .../image/v4/storage/storage_reference.go | 225 ---- .../image/v4/storage/storage_transport.go | 366 ------ .../github.com/containers/image/v4/tarball/doc.go | 48 - .../image/v4/tarball/tarball_reference.go | 94 -- .../containers/image/v4/tarball/tarball_src.go | 268 ---- .../image/v4/tarball/tarball_transport.go | 66 - .../v4/transports/alltransports/alltransports.go | 46 - .../v4/transports/alltransports/docker_daemon.go | 8 - .../transports/alltransports/docker_daemon_stub.go | 9 - .../image/v4/transports/alltransports/ostree.go | 8 - .../v4/transports/alltransports/ostree_stub.go | 9 - .../image/v4/transports/alltransports/storage.go | 8 - .../v4/transports/alltransports/storage_stub.go | 9 - .../containers/image/v4/transports/stub.go | 36 - .../containers/image/v4/transports/transports.go | 90 -- .../github.com/containers/image/v4/types/types.go | 535 -------- .../containers/image/v4/version/version.go | 18 - vendor/github.com/containers/image/v5/LICENSE | 189 +++ vendor/github.com/containers/image/v5/copy/copy.go | 1233 +++++++++++++++++++ .../containers/image/v5/copy/manifest.go | 154 +++ .../containers/image/v5/copy/progress_reader.go | 28 + vendor/github.com/containers/image/v5/copy/sign.go | 31 + .../image/v5/directory/directory_dest.go | 267 ++++ .../containers/image/v5/directory/directory_src.go | 96 ++ .../image/v5/directory/directory_transport.go | 193 +++ .../image/v5/directory/explicitfilepath/path.go | 56 + .../containers/image/v5/docker/archive/dest.go | 72 ++ .../containers/image/v5/docker/archive/src.go | 35 + .../image/v5/docker/archive/transport.go | 160 +++ .../github.com/containers/image/v5/docker/cache.go | 23 + .../containers/image/v5/docker/daemon/client.go | 85 ++ .../image/v5/docker/daemon/daemon_dest.go | 144 +++ .../image/v5/docker/daemon/daemon_src.go | 57 + .../image/v5/docker/daemon/daemon_transport.go | 223 ++++ .../containers/image/v5/docker/docker_client.go | 703 +++++++++++ .../containers/image/v5/docker/docker_image.go | 105 ++ .../image/v5/docker/docker_image_dest.go | 641 ++++++++++ .../containers/image/v5/docker/docker_image_src.go | 457 +++++++ .../containers/image/v5/docker/docker_transport.go | 168 +++ .../containers/image/v5/docker/errors.go | 43 + .../containers/image/v5/docker/lookaside.go | 202 +++ .../image/v5/docker/policyconfiguration/naming.go | 56 + .../containers/image/v5/docker/reference/README.md | 2 + .../image/v5/docker/reference/helpers.go | 42 + .../image/v5/docker/reference/normalize.go | 181 +++ .../image/v5/docker/reference/reference.go | 433 +++++++ .../containers/image/v5/docker/reference/regexp.go | 143 +++ .../containers/image/v5/docker/tarfile/dest.go | 415 +++++++ .../containers/image/v5/docker/tarfile/doc.go | 3 + .../containers/image/v5/docker/tarfile/src.go | 490 ++++++++ .../containers/image/v5/docker/tarfile/types.go | 28 + .../containers/image/v5/docker/wwwauthenticate.go | 159 +++ .../containers/image/v5/image/docker_list.go | 34 + .../containers/image/v5/image/docker_schema1.go | 202 +++ .../containers/image/v5/image/docker_schema2.go | 357 ++++++ .../containers/image/v5/image/manifest.go | 75 ++ .../github.com/containers/image/v5/image/memory.go | 64 + vendor/github.com/containers/image/v5/image/oci.go | 214 ++++ .../containers/image/v5/image/oci_index.go | 34 + .../containers/image/v5/image/sourced.go | 104 ++ .../containers/image/v5/image/unparsed.go | 95 ++ .../containers/image/v5/internal/pkg/keyctl/key.go | 73 ++ .../image/v5/internal/pkg/keyctl/keyring.go | 120 ++ .../image/v5/internal/pkg/keyctl/perm.go | 33 + .../image/v5/internal/pkg/keyctl/sys_linux.go | 25 + .../containers/image/v5/internal/tmpdir/tmpdir.go | 29 + .../containers/image/v5/manifest/docker_schema1.go | 316 +++++ .../containers/image/v5/manifest/docker_schema2.go | 349 ++++++ .../image/v5/manifest/docker_schema2_list.go | 216 ++++ .../containers/image/v5/manifest/list.go | 106 ++ .../containers/image/v5/manifest/manifest.go | 263 ++++ .../github.com/containers/image/v5/manifest/oci.go | 243 ++++ .../containers/image/v5/manifest/oci_index.go | 221 ++++ .../containers/image/v5/oci/archive/oci_dest.go | 159 +++ .../containers/image/v5/oci/archive/oci_src.go | 109 ++ .../image/v5/oci/archive/oci_transport.go | 192 +++ .../containers/image/v5/oci/internal/oci_util.go | 126 ++ .../containers/image/v5/oci/layout/oci_dest.go | 342 ++++++ .../containers/image/v5/oci/layout/oci_src.go | 189 +++ .../image/v5/oci/layout/oci_transport.go | 264 ++++ .../image/v5/openshift/openshift-copies.go | 1170 ++++++++++++++++++ .../containers/image/v5/openshift/openshift.go | 577 +++++++++ .../image/v5/openshift/openshift_transport.go | 157 +++ .../containers/image/v5/ostree/ostree_dest.go | 517 ++++++++ .../containers/image/v5/ostree/ostree_src.go | 430 +++++++ .../containers/image/v5/ostree/ostree_transport.go | 252 ++++ .../image/v5/pkg/blobinfocache/boltdb/boltdb.go | 332 +++++ .../image/v5/pkg/blobinfocache/default.go | 75 ++ .../internal/prioritize/prioritize.go | 110 ++ .../image/v5/pkg/blobinfocache/memory/memory.go | 145 +++ .../image/v5/pkg/blobinfocache/none/none.go | 49 + .../image/v5/pkg/compression/compression.go | 149 +++ .../image/v5/pkg/compression/internal/types.go | 57 + .../image/v5/pkg/compression/types/types.go | 13 + .../containers/image/v5/pkg/compression/zstd.go | 59 + .../image/v5/pkg/docker/config/config.go | 363 ++++++ .../image/v5/pkg/docker/config/config_linux.go | 115 ++ .../v5/pkg/docker/config/config_unsupported.go | 20 + .../containers/image/v5/pkg/strslice/README.md | 1 + .../containers/image/v5/pkg/strslice/strslice.go | 30 + .../v5/pkg/sysregistriesv2/system_registries_v2.go | 482 ++++++++ .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 112 ++ .../containers/image/v5/signature/docker.go | 65 + .../containers/image/v5/signature/json.go | 88 ++ .../containers/image/v5/signature/mechanism.go | 85 ++ .../image/v5/signature/mechanism_gpgme.go | 175 +++ .../image/v5/signature/mechanism_openpgp.go | 159 +++ .../containers/image/v5/signature/policy_config.go | 688 +++++++++++ .../containers/image/v5/signature/policy_eval.go | 289 +++++ .../image/v5/signature/policy_eval_baselayer.go | 20 + .../image/v5/signature/policy_eval_signedby.go | 130 ++ .../image/v5/signature/policy_eval_simple.go | 29 + .../image/v5/signature/policy_reference_match.go | 101 ++ .../containers/image/v5/signature/policy_types.go | 152 +++ .../containers/image/v5/signature/signature.go | 279 +++++ .../containers/image/v5/storage/storage_image.go | 1039 ++++++++++++++++ .../image/v5/storage/storage_reference.go | 299 +++++ .../image/v5/storage/storage_transport.go | 365 ++++++ .../github.com/containers/image/v5/tarball/doc.go | 47 + .../image/v5/tarball/tarball_reference.go | 94 ++ .../containers/image/v5/tarball/tarball_src.go | 274 +++++ .../image/v5/tarball/tarball_transport.go | 66 + .../v5/transports/alltransports/alltransports.go | 46 + .../v5/transports/alltransports/docker_daemon.go | 8 + .../transports/alltransports/docker_daemon_stub.go | 9 + .../image/v5/transports/alltransports/ostree.go | 8 + .../v5/transports/alltransports/ostree_stub.go | 9 + .../image/v5/transports/alltransports/storage.go | 8 + .../v5/transports/alltransports/storage_stub.go | 9 + .../containers/image/v5/transports/stub.go | 36 + .../containers/image/v5/transports/transports.go | 90 ++ .../github.com/containers/image/v5/types/types.go | 555 +++++++++ .../containers/image/v5/version/version.go | 18 + vendor/github.com/docker/docker/NOTICE | 2 +- vendor/github.com/docker/docker/api/swagger.yaml | 39 +- .../docker/api/types/container/host_config.go | 2 +- .../docker/docker/api/types/filters/parse.go | 2 +- .../docker/docker/api/types/registry/registry.go | 2 +- vendor/github.com/docker/docker/api/types/types.go | 1 + vendor/github.com/docker/docker/client/client.go | 3 +- .../docker/docker/client/container_list.go | 1 + vendor/github.com/docker/docker/client/events.go | 1 + vendor/github.com/docker/docker/client/hijack.go | 2 + .../github.com/docker/docker/client/image_list.go | 1 + .../docker/docker/client/network_list.go | 1 + .../github.com/docker/docker/client/plugin_list.go | 1 + vendor/github.com/docker/docker/client/request.go | 17 +- .../docker/docker/client/service_create.go | 2 +- .../github.com/docker/docker/client/volume_list.go | 1 + .../docker/docker/errdefs/http_helpers.go | 33 +- .../github.com/docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 1294 ++++++++++++++++++++ .../docker/docker/pkg/archive/archive_linux.go | 261 ++++ .../docker/docker/pkg/archive/archive_other.go | 7 + .../docker/docker/pkg/archive/archive_unix.go | 115 ++ .../docker/docker/pkg/archive/archive_windows.go | 67 + .../docker/docker/pkg/archive/changes.go | 445 +++++++ .../docker/docker/pkg/archive/changes_linux.go | 286 +++++ .../docker/docker/pkg/archive/changes_other.go | 97 ++ .../docker/docker/pkg/archive/changes_unix.go | 43 + .../docker/docker/pkg/archive/changes_windows.go | 34 + .../github.com/docker/docker/pkg/archive/copy.go | 480 ++++++++ .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../github.com/docker/docker/pkg/archive/diff.go | 260 ++++ .../docker/docker/pkg/archive/example_changes.go | 97 ++ .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../github.com/docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/homedir/homedir_linux.go | 16 - .../docker/docker/pkg/homedir/homedir_others.go | 6 - .../docker/docker/pkg/homedir/homedir_unix.go | 9 +- .../docker/docker/pkg/idtools/idtools_unix.go | 2 +- .../docker/docker/pkg/idtools/idtools_windows.go | 2 +- .../docker/docker/pkg/jsonmessage/jsonmessage.go | 283 +++++ .../docker/pkg/namesgenerator/names-generator.go | 6 - .../docker/pkg/parsers/kernel/kernel_darwin.go | 2 +- .../docker/pkg/parsers/kernel/kernel_windows.go | 4 +- .../docker/pkg/parsers/kernel/uname_solaris.go | 14 - vendor/github.com/docker/docker/pkg/pools/pools.go | 1 + .../github.com/docker/docker/pkg/system/filesys.go | 67 - .../docker/docker/pkg/system/filesys_unix.go | 67 + .../docker/docker/pkg/system/filesys_windows.go | 7 +- .../docker/docker/pkg/system/meminfo_linux.go | 8 +- vendor/github.com/docker/docker/pkg/system/path.go | 10 +- .../docker/docker/pkg/system/stat_linux.go | 3 +- .../docker/docker/pkg/system/stat_solaris.go | 13 - .../docker/docker/pkg/system/syscall_windows.go | 19 +- .../docker/docker/pkg/system/utimes_freebsd.go | 24 - .../docker/docker/pkg/system/utimes_linux.go | 25 - .../docker/docker/pkg/system/utimes_unix.go | 24 + .../docker/docker/pkg/term/term_windows.go | 2 +- .../docker/docker/pkg/term/windows/windows.go | 3 +- .../docker/docker/profiles/seccomp/default.json | 4 + .../docker/docker/profiles/seccomp/seccomp.go | 12 +- .../docker/profiles/seccomp/seccomp_default.go | 4 + .../fsouza/go-dockerclient/.gitattributes | 1 + .../github.com/fsouza/go-dockerclient/.gitignore | 2 - .../fsouza/go-dockerclient/.golangci.yaml | 29 + .../github.com/fsouza/go-dockerclient/.travis.yml | 4 +- vendor/github.com/fsouza/go-dockerclient/AUTHORS | 1 + vendor/github.com/fsouza/go-dockerclient/Makefile | 19 +- vendor/github.com/fsouza/go-dockerclient/README.md | 2 +- .../github.com/fsouza/go-dockerclient/appveyor.yml | 10 +- vendor/github.com/fsouza/go-dockerclient/auth.go | 3 +- vendor/github.com/fsouza/go-dockerclient/client.go | 50 +- .../fsouza/go-dockerclient/client_windows.go | 3 +- .../github.com/fsouza/go-dockerclient/container.go | 69 +- .../fsouza/go-dockerclient/distribution.go | 3 +- vendor/github.com/fsouza/go-dockerclient/event.go | 10 +- vendor/github.com/fsouza/go-dockerclient/exec.go | 11 +- vendor/github.com/fsouza/go-dockerclient/go.mod | 12 +- vendor/github.com/fsouza/go-dockerclient/go.sum | 17 +- vendor/github.com/fsouza/go-dockerclient/image.go | 107 +- .../go-dockerclient/internal/archive/archive.go | 509 -------- .../internal/archive/archive_linux.go | 106 -- .../internal/archive/archive_other.go | 11 - .../internal/archive/archive_unix.go | 77 -- .../internal/archive/archive_windows.go | 71 -- .../internal/archive/changes_unix.go | 16 - .../internal/archive/changes_windows.go | 11 - .../go-dockerclient/internal/archive/copy.go | 29 - .../go-dockerclient/internal/archive/whiteouts.go | 27 - .../internal/jsonmessage/jsonmessage.go | 402 ------ .../fsouza/go-dockerclient/internal/term/term.go | 11 - .../go-dockerclient/internal/term/winsize.go | 16 - .../internal/term/winsize_windows.go | 22 - vendor/github.com/fsouza/go-dockerclient/misc.go | 6 +- .../github.com/fsouza/go-dockerclient/network.go | 16 +- vendor/github.com/fsouza/go-dockerclient/plugin.go | 93 +- vendor/github.com/fsouza/go-dockerclient/swarm.go | 10 +- .../fsouza/go-dockerclient/swarm_configs.go | 10 +- .../fsouza/go-dockerclient/swarm_node.go | 8 +- .../fsouza/go-dockerclient/swarm_secrets.go | 10 +- .../fsouza/go-dockerclient/swarm_service.go | 12 +- .../fsouza/go-dockerclient/swarm_task.go | 4 +- vendor/github.com/fsouza/go-dockerclient/system.go | 3 +- vendor/github.com/fsouza/go-dockerclient/tar.go | 2 +- vendor/github.com/fsouza/go-dockerclient/tls.go | 2 +- vendor/github.com/fsouza/go-dockerclient/volume.go | 10 +- vendor/github.com/ijc/Gotty/LICENSE | 26 - vendor/github.com/ijc/Gotty/README | 5 - vendor/github.com/ijc/Gotty/TODO | 3 - vendor/github.com/ijc/Gotty/attributes.go | 514 -------- vendor/github.com/ijc/Gotty/gotty.go | 244 ---- vendor/github.com/ijc/Gotty/parser.go | 362 ------ vendor/github.com/ijc/Gotty/types.go | 23 - vendor/github.com/morikuni/aec/LICENSE | 21 + vendor/github.com/morikuni/aec/README.md | 178 +++ vendor/github.com/morikuni/aec/aec.go | 137 +++ vendor/github.com/morikuni/aec/ansi.go | 59 + vendor/github.com/morikuni/aec/builder.go | 388 ++++++ vendor/github.com/morikuni/aec/sample.gif | Bin 0 -> 12548 bytes vendor/github.com/morikuni/aec/sgr.go | 202 +++ vendor/github.com/openshift/imagebuilder/OWNERS | 2 - vendor/github.com/openshift/imagebuilder/README.md | 5 + .../github.com/openshift/imagebuilder/builder.go | 3 +- .../github.com/openshift/imagebuilder/constants.go | 4 - .../github.com/openshift/imagebuilder/evaluator.go | 20 +- .../github.com/openshift/imagebuilder/vendor.conf | 2 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 66 + vendor/modules.txt | 104 +- 414 files changed, 30947 insertions(+), 24288 deletions(-) create mode 100644 vendor/github.com/containerd/containerd/LICENSE create mode 100644 vendor/github.com/containerd/containerd/NOTICE create mode 100644 vendor/github.com/containerd/containerd/errdefs/errors.go create mode 100644 vendor/github.com/containerd/containerd/errdefs/grpc.go create mode 100644 vendor/github.com/containerd/continuity/fs/copy.go create mode 100644 vendor/github.com/containerd/continuity/fs/copy_linux.go create mode 100644 vendor/github.com/containerd/continuity/fs/copy_unix.go create mode 100644 vendor/github.com/containerd/continuity/fs/copy_windows.go create mode 100644 vendor/github.com/containerd/continuity/fs/diff.go create mode 100644 vendor/github.com/containerd/continuity/fs/diff_unix.go create mode 100644 vendor/github.com/containerd/continuity/fs/diff_windows.go create mode 100644 vendor/github.com/containerd/continuity/fs/dtype_linux.go create mode 100644 vendor/github.com/containerd/continuity/fs/du.go create mode 100644 vendor/github.com/containerd/continuity/fs/du_unix.go create mode 100644 vendor/github.com/containerd/continuity/fs/du_windows.go create mode 100644 vendor/github.com/containerd/continuity/fs/hardlink.go create mode 100644 vendor/github.com/containerd/continuity/fs/hardlink_unix.go create mode 100644 vendor/github.com/containerd/continuity/fs/hardlink_windows.go create mode 100644 vendor/github.com/containerd/continuity/fs/path.go create mode 100644 vendor/github.com/containerd/continuity/fs/stat_bsd.go create mode 100644 vendor/github.com/containerd/continuity/fs/stat_linux.go create mode 100644 vendor/github.com/containerd/continuity/fs/time.go delete mode 100644 vendor/github.com/containerd/continuity/pathdriver/path_driver.go create mode 100644 vendor/github.com/containerd/continuity/syscallx/syscall_unix.go create mode 100644 vendor/github.com/containerd/continuity/syscallx/syscall_windows.go create mode 100644 vendor/github.com/containerd/continuity/sysx/README.md create mode 100644 vendor/github.com/containerd/continuity/sysx/file_posix.go create mode 100644 vendor/github.com/containerd/continuity/sysx/generate.sh create mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_linux.go create mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_solaris.go create mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_unix.go create mode 100644 vendor/github.com/containerd/continuity/sysx/xattr.go create mode 100644 vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go create mode 100644 vendor/github.com/containers/buildah/OWNERS delete mode 100644 vendor/github.com/containers/image/v4/LICENSE delete mode 100644 vendor/github.com/containers/image/v4/copy/copy.go delete mode 100644 vendor/github.com/containers/image/v4/copy/manifest.go delete mode 100644 vendor/github.com/containers/image/v4/copy/progress_reader.go delete mode 100644 vendor/github.com/containers/image/v4/copy/sign.go delete mode 100644 vendor/github.com/containers/image/v4/directory/directory_dest.go delete mode 100644 vendor/github.com/containers/image/v4/directory/directory_src.go delete mode 100644 vendor/github.com/containers/image/v4/directory/directory_transport.go delete mode 100644 vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go delete mode 100644 vendor/github.com/containers/image/v4/docker/archive/dest.go delete mode 100644 vendor/github.com/containers/image/v4/docker/archive/src.go delete mode 100644 vendor/github.com/containers/image/v4/docker/archive/transport.go delete mode 100644 vendor/github.com/containers/image/v4/docker/cache.go delete mode 100644 vendor/github.com/containers/image/v4/docker/daemon/client.go delete mode 100644 vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go delete mode 100644 vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go delete mode 100644 vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go delete mode 100644 vendor/github.com/containers/image/v4/docker/docker_client.go delete mode 100644 vendor/github.com/containers/image/v4/docker/docker_image.go delete mode 100644 vendor/github.com/containers/image/v4/docker/docker_image_dest.go delete mode 100644 vendor/github.com/containers/image/v4/docker/docker_image_src.go delete mode 100644 vendor/github.com/containers/image/v4/docker/docker_transport.go delete mode 100644 vendor/github.com/containers/image/v4/docker/lookaside.go delete mode 100644 vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go delete mode 100644 vendor/github.com/containers/image/v4/docker/reference/README.md delete mode 100644 vendor/github.com/containers/image/v4/docker/reference/helpers.go delete mode 100644 vendor/github.com/containers/image/v4/docker/reference/normalize.go delete mode 100644 vendor/github.com/containers/image/v4/docker/reference/reference.go delete mode 100644 vendor/github.com/containers/image/v4/docker/reference/regexp.go delete mode 100644 vendor/github.com/containers/image/v4/docker/tarfile/dest.go delete mode 100644 vendor/github.com/containers/image/v4/docker/tarfile/doc.go delete mode 100644 vendor/github.com/containers/image/v4/docker/tarfile/src.go delete mode 100644 vendor/github.com/containers/image/v4/docker/tarfile/types.go delete mode 100644 vendor/github.com/containers/image/v4/docker/wwwauthenticate.go delete mode 100644 vendor/github.com/containers/image/v4/image/docker_list.go delete mode 100644 vendor/github.com/containers/image/v4/image/docker_schema1.go delete mode 100644 vendor/github.com/containers/image/v4/image/docker_schema2.go delete mode 100644 vendor/github.com/containers/image/v4/image/manifest.go delete mode 100644 vendor/github.com/containers/image/v4/image/memory.go delete mode 100644 vendor/github.com/containers/image/v4/image/oci.go delete mode 100644 vendor/github.com/containers/image/v4/image/sourced.go delete mode 100644 vendor/github.com/containers/image/v4/image/unparsed.go delete mode 100644 vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go delete mode 100644 vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go delete mode 100644 vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go delete mode 100644 vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go delete mode 100644 vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go delete mode 100644 vendor/github.com/containers/image/v4/manifest/docker_schema1.go delete mode 100644 vendor/github.com/containers/image/v4/manifest/docker_schema2.go delete mode 100644 vendor/github.com/containers/image/v4/manifest/manifest.go delete mode 100644 vendor/github.com/containers/image/v4/manifest/oci.go delete mode 100644 vendor/github.com/containers/image/v4/oci/archive/oci_dest.go delete mode 100644 vendor/github.com/containers/image/v4/oci/archive/oci_src.go delete mode 100644 vendor/github.com/containers/image/v4/oci/archive/oci_transport.go delete mode 100644 vendor/github.com/containers/image/v4/oci/internal/oci_util.go delete mode 100644 vendor/github.com/containers/image/v4/oci/layout/oci_dest.go delete mode 100644 vendor/github.com/containers/image/v4/oci/layout/oci_src.go delete mode 100644 vendor/github.com/containers/image/v4/oci/layout/oci_transport.go delete mode 100644 vendor/github.com/containers/image/v4/openshift/openshift-copies.go delete mode 100644 vendor/github.com/containers/image/v4/openshift/openshift.go delete mode 100644 vendor/github.com/containers/image/v4/openshift/openshift_transport.go delete mode 100644 vendor/github.com/containers/image/v4/ostree/ostree_dest.go delete mode 100644 vendor/github.com/containers/image/v4/ostree/ostree_src.go delete mode 100644 vendor/github.com/containers/image/v4/ostree/ostree_transport.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/compression/compression.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/compression/internal/types.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/compression/types/types.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/compression/zstd.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/docker/config/config.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/strslice/README.md delete mode 100644 vendor/github.com/containers/image/v4/pkg/strslice/strslice.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go delete mode 100644 vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go delete mode 100644 vendor/github.com/containers/image/v4/signature/docker.go delete mode 100644 vendor/github.com/containers/image/v4/signature/json.go delete mode 100644 vendor/github.com/containers/image/v4/signature/mechanism.go delete mode 100644 vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go delete mode 100644 vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go delete mode 100644 vendor/github.com/containers/image/v4/signature/policy_config.go delete mode 100644 vendor/github.com/containers/image/v4/signature/policy_eval.go delete mode 100644 vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go delete mode 100644 vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go delete mode 100644 vendor/github.com/containers/image/v4/signature/policy_eval_simple.go delete mode 100644 vendor/github.com/containers/image/v4/signature/policy_reference_match.go delete mode 100644 vendor/github.com/containers/image/v4/signature/policy_types.go delete mode 100644 vendor/github.com/containers/image/v4/signature/signature.go delete mode 100644 vendor/github.com/containers/image/v4/storage/storage_image.go delete mode 100644 vendor/github.com/containers/image/v4/storage/storage_reference.go delete mode 100644 vendor/github.com/containers/image/v4/storage/storage_transport.go delete mode 100644 vendor/github.com/containers/image/v4/tarball/doc.go delete mode 100644 vendor/github.com/containers/image/v4/tarball/tarball_reference.go delete mode 100644 vendor/github.com/containers/image/v4/tarball/tarball_src.go delete mode 100644 vendor/github.com/containers/image/v4/tarball/tarball_transport.go delete mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go delete mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go delete mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go delete mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/ostree.go delete mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go delete mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/storage.go delete mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go delete mode 100644 vendor/github.com/containers/image/v4/transports/stub.go delete mode 100644 vendor/github.com/containers/image/v4/transports/transports.go delete mode 100644 vendor/github.com/containers/image/v4/types/types.go delete mode 100644 vendor/github.com/containers/image/v4/version/version.go create mode 100644 vendor/github.com/containers/image/v5/LICENSE create mode 100644 vendor/github.com/containers/image/v5/copy/copy.go create mode 100644 vendor/github.com/containers/image/v5/copy/manifest.go create mode 100644 vendor/github.com/containers/image/v5/copy/progress_reader.go create mode 100644 vendor/github.com/containers/image/v5/copy/sign.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_dest.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_src.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_transport.go create mode 100644 vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/src.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/cache.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/client.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_client.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image_dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image_src.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/errors.go create mode 100644 vendor/github.com/containers/image/v5/docker/lookaside.go create mode 100644 vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/README.md create mode 100644 vendor/github.com/containers/image/v5/docker/reference/helpers.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/normalize.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/reference.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/regexp.go create mode 100644 vendor/github.com/containers/image/v5/docker/tarfile/dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/tarfile/doc.go create mode 100644 vendor/github.com/containers/image/v5/docker/tarfile/src.go create mode 100644 vendor/github.com/containers/image/v5/docker/tarfile/types.go create mode 100644 vendor/github.com/containers/image/v5/docker/wwwauthenticate.go create mode 100644 vendor/github.com/containers/image/v5/image/docker_list.go create mode 100644 vendor/github.com/containers/image/v5/image/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v5/image/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/image/manifest.go create mode 100644 vendor/github.com/containers/image/v5/image/memory.go create mode 100644 vendor/github.com/containers/image/v5/image/oci.go create mode 100644 vendor/github.com/containers/image/v5/image/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/image/sourced.go create mode 100644 vendor/github.com/containers/image/v5/image/unparsed.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go create mode 100644 vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go create mode 100644 vendor/github.com/containers/image/v5/manifest/list.go create mode 100644 vendor/github.com/containers/image/v5/manifest/manifest.go create mode 100644 vendor/github.com/containers/image/v5/manifest/oci.go create mode 100644 vendor/github.com/containers/image/v5/manifest/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_dest.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_src.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_transport.go create mode 100644 vendor/github.com/containers/image/v5/oci/internal/oci_util.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_dest.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_src.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_transport.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift-copies.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift_transport.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_dest.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_src.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_transport.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/compression.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/internal/types.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/types/types.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/zstd.go create mode 100644 vendor/github.com/containers/image/v5/pkg/docker/config/config.go create mode 100644 vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go create mode 100644 vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go create mode 100644 vendor/github.com/containers/image/v5/pkg/strslice/README.md create mode 100644 vendor/github.com/containers/image/v5/pkg/strslice/strslice.go create mode 100644 vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go create mode 100644 vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go create mode 100644 vendor/github.com/containers/image/v5/signature/docker.go create mode 100644 vendor/github.com/containers/image/v5/signature/json.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_config.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_simple.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_reference_match.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_types.go create mode 100644 vendor/github.com/containers/image/v5/signature/signature.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_image.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_reference.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_transport.go create mode 100644 vendor/github.com/containers/image/v5/tarball/doc.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_reference.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_src.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_transport.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/ostree.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/storage.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/transports.go create mode 100644 vendor/github.com/containers/image/v5/types/types.go create mode 100644 vendor/github.com/containers/image/v5/version/version.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/README.md create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/example_changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/.gitattributes create mode 100644 vendor/github.com/fsouza/go-dockerclient/.golangci.yaml delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/term/term.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go delete mode 100644 vendor/github.com/ijc/Gotty/LICENSE delete mode 100644 vendor/github.com/ijc/Gotty/README delete mode 100644 vendor/github.com/ijc/Gotty/TODO delete mode 100644 vendor/github.com/ijc/Gotty/attributes.go delete mode 100644 vendor/github.com/ijc/Gotty/gotty.go delete mode 100644 vendor/github.com/ijc/Gotty/parser.go delete mode 100644 vendor/github.com/ijc/Gotty/types.go create mode 100644 vendor/github.com/morikuni/aec/LICENSE create mode 100644 vendor/github.com/morikuni/aec/README.md create mode 100644 vendor/github.com/morikuni/aec/aec.go create mode 100644 vendor/github.com/morikuni/aec/ansi.go create mode 100644 vendor/github.com/morikuni/aec/builder.go create mode 100644 vendor/github.com/morikuni/aec/sample.gif create mode 100644 vendor/github.com/morikuni/aec/sgr.go create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go (limited to 'vendor') diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 000000000..584149b6e --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 000000000..8915f0277 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 000000000..b5200afc0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,93 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with errors.Wrap and error.Wrapf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + + "github.com/pkg/errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Cause(err) == ErrInvalidArgument +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Cause(err) == ErrNotFound +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Cause(err) == ErrAlreadyExists +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Cause(err) == ErrFailedPrecondition +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Cause(err) == ErrUnavailable +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Cause(err) == ErrNotImplemented +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Cause(err) == context.Canceled +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Cause(err) == context.DeadlineExceeded +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 000000000..209f63bd0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "strings" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(errors.Wrapf(err, format, args...)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = errors.Wrap(cls, msg) + } else { + err = errors.WithStack(cls) + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go new file mode 100644 index 000000000..ad61022ad --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy.go @@ -0,0 +1,172 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/pkg/errors" +) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} + +// XAttrErrorHandlers transform a non-nil xattr error. +// Return nil to ignore an error. +// xattrKey can be empty for listxattr operation. +type XAttrErrorHandler func(dst, src, xattrKey string, err error) error + +type copyDirOpts struct { + xeh XAttrErrorHandler +} + +type CopyDirOpt func(*copyDirOpts) error + +// WithXAttrErrorHandler allows specifying XAttrErrorHandler +// If nil XAttrErrorHandler is specified (default), CopyDir stops +// on a non-nil xattr error. +func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt { + return func(o *copyDirOpts) error { + o.xeh = xeh + return nil + } +} + +// WithAllowXAttrErrors allows ignoring xattr errors. +func WithAllowXAttrErrors() CopyDirOpt { + xeh := func(dst, src, xattrKey string, err error) error { + return nil + } + return WithXAttrErrorHandler(xeh) +} + +// CopyDir copies the directory from src to dst. +// Most efficient copy of files is attempted. +func CopyDir(dst, src string, opts ...CopyDirOpt) error { + var o copyDirOpts + for _, opt := range opts { + if err := opt(&o); err != nil { + return err + } + } + inodes := map[uint64]string{} + return copyDirectory(dst, src, inodes, &o) +} + +func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error { + stat, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", src) + } + if !stat.IsDir() { + return errors.Errorf("source is not directory") + } + + if st, err := os.Stat(dst); err != nil { + if err := os.Mkdir(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", dst) + } + } else if !st.IsDir() { + return errors.Errorf("cannot copy to non-directory: %s", dst) + } else { + if err := os.Chmod(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod on %s", dst) + } + } + + fis, err := ioutil.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "failed to read %s", src) + } + + if err := copyFileInfo(stat, dst); err != nil { + return errors.Wrapf(err, "failed to copy file info for %s", dst) + } + + for _, fi := range fis { + source := filepath.Join(src, fi.Name()) + target := filepath.Join(dst, fi.Name()) + + switch { + case fi.IsDir(): + if err := copyDirectory(target, source, inodes, o); err != nil { + return err + } + continue + case (fi.Mode() & os.ModeType) == 0: + link, err := getLinkSource(target, fi, inodes) + if err != nil { + return errors.Wrap(err, "failed to get hardlink") + } + if link != "" { + if err := os.Link(link, target); err != nil { + return errors.Wrap(err, "failed to create hard link") + } + } else if err := CopyFile(target, source); err != nil { + return errors.Wrap(err, "failed to copy files") + } + case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: + link, err := os.Readlink(source) + if err != nil { + return errors.Wrapf(err, "failed to read link: %s", source) + } + if err := os.Symlink(link, target); err != nil { + return errors.Wrapf(err, "failed to create symlink: %s", target) + } + case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + if err := copyDevice(target, fi); err != nil { + return errors.Wrapf(err, "failed to create device") + } + default: + // TODO: Support pipes and sockets + return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) + } + if err := copyFileInfo(fi, target); err != nil { + return errors.Wrap(err, "failed to copy file info") + } + + if err := copyXAttrs(target, source, o.xeh); err != nil { + return errors.Wrap(err, "failed to copy xattrs") + } + } + + return nil +} + +// CopyFile copies the source file to the target. +// The most efficient means of copying is used for the platform. +func CopyFile(target, source string) error { + src, err := os.Open(source) + if err != nil { + return errors.Wrapf(err, "failed to open source %s", source) + } + defer src.Close() + tgt, err := os.Create(target) + if err != nil { + return errors.Wrapf(err, "failed to open target %s", target) + } + defer tgt.Close() + + return copyFileContent(tgt, src) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go new file mode 100644 index 000000000..81c71522a --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_linux.go @@ -0,0 +1,144 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +const maxSSizeT = int64(^uint(0) >> 1) + +func copyFileContent(dst, src *os.File) error { + st, err := src.Stat() + if err != nil { + return errors.Wrap(err, "unable to stat source") + } + + size := st.Size() + first := true + srcFd := int(src.Fd()) + dstFd := int(dst.Fd()) + + for size > 0 { + // Ensure that we are never trying to copy more than SSIZE_MAX at a + // time and at the same time avoids overflows when the file is larger + // than 4GB on 32-bit systems. + var copySize int + if size > maxSSizeT { + copySize = int(maxSSizeT) + } else { + copySize = int(size) + } + n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0) + if err != nil { + if (err != unix.ENOSYS && err != unix.EXDEV) || !first { + return errors.Wrap(err, "copy file range failed") + } + + buf := bufferPool.Get().(*[]byte) + _, err = io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return errors.Wrap(err, "userspace copy failed") + } + + first = false + size -= int64(n) + } + + return nil +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + e := errors.Wrapf(err, "failed to list xattrs on %s", src) + if xeh != nil { + e = xeh(dst, src, "", e) + } + return e + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_unix.go b/vendor/github.com/containerd/continuity/fs/copy_unix.go new file mode 100644 index 000000000..73c01a46d --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_unix.go @@ -0,0 +1,112 @@ +// +build solaris darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)} + if err := syscall.UtimesNano(name, timespec); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + e := errors.Wrapf(err, "failed to list xattrs on %s", src) + if xeh != nil { + e = xeh(dst, src, "", e) + } + return e + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_windows.go b/vendor/github.com/containerd/continuity/fs/copy_windows.go new file mode 100644 index 000000000..27c7d7dbb --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_windows.go @@ -0,0 +1,49 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + + // TODO: copy windows specific metadata + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + return errors.New("device copy not supported") +} diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go new file mode 100644 index 000000000..e64f9e73d --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff.go @@ -0,0 +1,326 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" + "strings" + + "golang.org/x/sync/errgroup" + + "github.com/sirupsen/logrus" +) + +// ChangeKind is the type of modification that +// a change is making. +type ChangeKind int + +const ( + // ChangeKindUnmodified represents an unmodified + // file + ChangeKindUnmodified = iota + + // ChangeKindAdd represents an addition of + // a file + ChangeKindAdd + + // ChangeKindModify represents a change to + // an existing file + ChangeKindModify + + // ChangeKindDelete represents a delete of + // a file + ChangeKindDelete +) + +func (k ChangeKind) String() string { + switch k { + case ChangeKindUnmodified: + return "unmodified" + case ChangeKindAdd: + return "add" + case ChangeKindModify: + return "modify" + case ChangeKindDelete: + return "delete" + default: + return "" + } +} + +// Change represents single change between a diff and its parent. +type Change struct { + Kind ChangeKind + Path string +} + +// ChangeFunc is the type of function called for each change +// computed during a directory changes calculation. +type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error + +// Changes computes changes between two directories calling the +// given change function for each computed change. The first +// directory is intended to the base directory and second +// directory the changed directory. +// +// The change callback is called by the order of path names and +// should be appliable in that order. +// Due to this apply ordering, the following is true +// - Removed directory trees only create a single change for the root +// directory removed. Remaining changes are implied. +// - A directory which is modified to become a file will not have +// delete entries for sub-path items, their removal is implied +// by the removal of the parent directory. +// +// Opaque directories will not be treated specially and each file +// removed from the base directory will show up as a removal. +// +// File content comparisons will be done on files which have timestamps +// which may have been truncated. If either of the files being compared +// has a zero value nanosecond value, each byte will be compared for +// differences. If 2 files have the same seconds value but different +// nanosecond values where one of those values is zero, the files will +// be considered unchanged if the content is the same. This behavior +// is to account for timestamp truncation during archiving. +func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error { + if a == "" { + logrus.Debugf("Using single walk diff for %s", b) + return addDirChanges(ctx, changeFn, b) + } else if diffOptions := detectDirDiff(b, a); diffOptions != nil { + logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a) + return diffDirChanges(ctx, changeFn, a, diffOptions) + } + + logrus.Debugf("Using double walk diff for %s from %s", b, a) + return doubleWalkDiff(ctx, changeFn, a, b) +} + +func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + return changeFn(ChangeKindAdd, path, f, nil) + }) +} + +// diffDirOptions is used when the diff can be directly calculated from +// a diff directory to its base, without walking both trees. +type diffDirOptions struct { + diffDir string + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +} + +// diffDirChanges walks the diff directory and compares changes against the base. +func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error { + changedDirs := make(map[string]struct{}) + return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(o.diffDir, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // TODO: handle opaqueness, start new double walker at this + // location to get deletes, and skip tree in single walker + + if o.skipChange != nil { + if skip, err := o.skipChange(path); skip { + return err + } + } + + var kind ChangeKind + + deletedFile, err := o.deleteChange(o.diffDir, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + path = deletedFile + kind = ChangeKindDelete + f = nil + } else { + // Otherwise, the file was added + kind = ChangeKindAdd + + // ...Unless it already existed in a base, in which case, it's a modification + stat, err := os.Stat(filepath.Join(base, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the base, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + kind = ChangeKindModify + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if kind == ChangeKindAdd || kind == ChangeKindDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + pi, err := os.Stat(filepath.Join(o.diffDir, parent)) + if err := changeFn(ChangeKindModify, parent, pi, err); err != nil { + return err + } + changedDirs[parent] = struct{}{} + } + } + + return changeFn(kind, path, f, nil) + }) +} + +// doubleWalkDiff walks both directories to create a diff +func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) { + g, ctx := errgroup.WithContext(ctx) + + var ( + c1 = make(chan *currentPath) + c2 = make(chan *currentPath) + + f1, f2 *currentPath + rmdir string + ) + g.Go(func() error { + defer close(c1) + return pathWalk(ctx, a, c1) + }) + g.Go(func() error { + defer close(c2) + return pathWalk(ctx, b, c2) + }) + g.Go(func() error { + for c1 != nil || c2 != nil { + if f1 == nil && c1 != nil { + f1, err = nextPath(ctx, c1) + if err != nil { + return err + } + if f1 == nil { + c1 = nil + } + } + + if f2 == nil && c2 != nil { + f2, err = nextPath(ctx, c2) + if err != nil { + return err + } + if f2 == nil { + c2 = nil + } + } + if f1 == nil && f2 == nil { + continue + } + + var f os.FileInfo + k, p := pathChange(f1, f2) + switch k { + case ChangeKindAdd: + if rmdir != "" { + rmdir = "" + } + f = f2.f + f2 = nil + case ChangeKindDelete: + // Check if this file is already removed by being + // under of a removed directory + if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { + f1 = nil + continue + } else if f1.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f1 = nil + case ChangeKindModify: + same, err := sameFile(f1, f2) + if err != nil { + return err + } + if f1.f.IsDir() && !f2.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f = f2.f + f1 = nil + f2 = nil + if same { + if !isLinked(f) { + continue + } + k = ChangeKindUnmodified + } + } + if err := changeFn(k, p, f, nil); err != nil { + return err + } + } + return nil + }) + + return g.Wait() +} diff --git a/vendor/github.com/containerd/continuity/fs/diff_unix.go b/vendor/github.com/containerd/continuity/fs/diff_unix.go new file mode 100644 index 000000000..7913af27d --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff_unix.go @@ -0,0 +1,74 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "bytes" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +// detectDirDiff returns diff dir options if a directory could +// be found in the mount info for upper which is the direct +// diff with the provided lower directory +func detectDirDiff(upper, lower string) *diffDirOptions { + // TODO: get mount options for upper + // TODO: detect AUFS + // TODO: detect overlay + return nil +} + +// compareSysStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareSysStat(s1, s2 interface{}) (bool, error) { + ls1, ok := s1.(*syscall.Stat_t) + if !ok { + return false, nil + } + ls2, ok := s2.(*syscall.Stat_t) + if !ok { + return false, nil + } + + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + c1, err := sysx.LGetxattr(p1, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p1) + } + c2, err := sysx.LGetxattr(p2, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p2) + } + return bytes.Equal(c1, c2), nil +} + +func isLinked(f os.FileInfo) bool { + s, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return false + } + return !f.IsDir() && s.Nlink > 1 +} diff --git a/vendor/github.com/containerd/continuity/fs/diff_windows.go b/vendor/github.com/containerd/continuity/fs/diff_windows.go new file mode 100644 index 000000000..4bfa72d3a --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff_windows.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func detectDirDiff(upper, lower string) *diffDirOptions { + return nil +} + +func compareSysStat(s1, s2 interface{}) (bool, error) { + f1, ok := s1.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + f2, ok := s2.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + return f1.FileAttributes == f2.FileAttributes, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + // TODO: Use windows equivalent + return true, nil +} + +func isLinked(os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/vendor/github.com/containerd/continuity/fs/dtype_linux.go new file mode 100644 index 000000000..10510d8de --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/dtype_linux.go @@ -0,0 +1,103 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/containerd/continuity/fs/du.go b/vendor/github.com/containerd/continuity/fs/du.go new file mode 100644 index 000000000..fccc985dc --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "context" + +// Usage of disk information +type Usage struct { + Inodes int64 + Size int64 +} + +// DiskUsage counts the number of inodes and disk usage for the resources under +// path. +func DiskUsage(ctx context.Context, roots ...string) (Usage, error) { + return diskUsage(ctx, roots...) +} + +// DiffUsage counts the numbers of inodes and disk usage in the +// diff between the 2 directories. The first path is intended +// as the base directory and the second as the changed directory. +func DiffUsage(ctx context.Context, a, b string) (Usage, error) { + return diffUsage(ctx, a, b) +} diff --git a/vendor/github.com/containerd/continuity/fs/du_unix.go b/vendor/github.com/containerd/continuity/fs/du_unix.go new file mode 100644 index 000000000..e22ffbea3 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du_unix.go @@ -0,0 +1,110 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" + "syscall" +) + +type inode struct { + // TODO(stevvooe): Can probably reduce memory usage by not tracking + // device, but we can leave this right for now. + dev, ino uint64 +} + +func newInode(stat *syscall.Stat_t) inode { + return inode{ + // Dev is uint32 on darwin/bsd, uint64 on linux/solaris + dev: uint64(stat.Dev), // nolint: unconvert + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + ino: uint64(stat.Ino), // nolint: unconvert + } +} + +func diskUsage(ctx context.Context, roots ...string) (Usage, error) { + + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/du_windows.go b/vendor/github.com/containerd/continuity/fs/du_windows.go new file mode 100644 index 000000000..8f25ec59c --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du_windows.go @@ -0,0 +1,82 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" +) + +func diskUsage(ctx context.Context, roots ...string) (Usage, error) { + var ( + size int64 + ) + + // TODO(stevvooe): Support inodes (or equivalent) for windows. + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + size += fi.Size() + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + size += fi.Size() + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Size: size, + }, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink.go b/vendor/github.com/containerd/continuity/fs/hardlink.go new file mode 100644 index 000000000..762aa45e6 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "os" + +// GetLinkInfo returns an identifier representing the node a hardlink is pointing +// to. If the file is not hard linked then 0 will be returned. +func GetLinkInfo(fi os.FileInfo) (uint64, bool) { + return getLinkInfo(fi) +} + +// getLinkSource returns a path for the given name and +// file info to its link source in the provided inode +// map. If the given file name is not in the map and +// has other links, it is added to the inode map +// to be a source for other link locations. +func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { + inode, isHardlink := getLinkInfo(fi) + if !isHardlink { + return "", nil + } + + path, ok := inodes[inode] + if !ok { + inodes[inode] = name + } + return path, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_unix.go b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go new file mode 100644 index 000000000..f95f0904c --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "os" + "syscall" +) + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, false + } + + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_windows.go b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go new file mode 100644 index 000000000..748554714 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "os" + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + return 0, false +} diff --git a/vendor/github.com/containerd/continuity/fs/path.go b/vendor/github.com/containerd/continuity/fs/path.go new file mode 100644 index 000000000..8863caa9d --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/path.go @@ -0,0 +1,313 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +var ( + errTooManyLinks = errors.New("too many links") +) + +type currentPath struct { + path string + f os.FileInfo + fullPath string +} + +func pathChange(lower, upper *currentPath) (ChangeKind, string) { + if lower == nil { + if upper == nil { + panic("cannot compare nil paths") + } + return ChangeKindAdd, upper.path + } + if upper == nil { + return ChangeKindDelete, lower.path + } + + switch i := directoryCompare(lower.path, upper.path); { + case i < 0: + // File in lower that is not in upper + return ChangeKindDelete, lower.path + case i > 0: + // File in upper that is not in lower + return ChangeKindAdd, upper.path + default: + return ChangeKindModify, upper.path + } +} + +func directoryCompare(a, b string) int { + l := len(a) + if len(b) < l { + l = len(b) + } + for i := 0; i < l; i++ { + c1, c2 := a[i], b[i] + if c1 == filepath.Separator { + c1 = byte(0) + } + if c2 == filepath.Separator { + c2 = byte(0) + } + if c1 < c2 { + return -1 + } + if c1 > c2 { + return +1 + } + } + if len(a) < len(b) { + return -1 + } + if len(a) > len(b) { + return +1 + } + return 0 +} + +func sameFile(f1, f2 *currentPath) (bool, error) { + if os.SameFile(f1.f, f2.f) { + return true, nil + } + + equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys()) + if err != nil || !equalStat { + return equalStat, err + } + + if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq { + return eq, err + } + + // If not a directory also check size, modtime, and content + if !f1.f.IsDir() { + if f1.f.Size() != f2.f.Size() { + return false, nil + } + t1 := f1.f.ModTime() + t2 := f2.f.ModTime() + + if t1.Unix() != t2.Unix() { + return false, nil + } + + // If the timestamp may have been truncated in both of the + // files, check content of file to determine difference + if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 { + var eq bool + if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink { + eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath) + } else if f1.f.Size() > 0 { + eq, err = compareFileContent(f1.fullPath, f2.fullPath) + } + if err != nil || !eq { + return eq, err + } + } else if t1.Nanosecond() != t2.Nanosecond() { + return false, nil + } + } + + return true, nil +} + +func compareSymlinkTarget(p1, p2 string) (bool, error) { + t1, err := os.Readlink(p1) + if err != nil { + return false, err + } + t2, err := os.Readlink(p2) + if err != nil { + return false, err + } + return t1 == t2, nil +} + +const compareChuckSize = 32 * 1024 + +// compareFileContent compares the content of 2 same sized files +// by comparing each byte. +func compareFileContent(p1, p2 string) (bool, error) { + f1, err := os.Open(p1) + if err != nil { + return false, err + } + defer f1.Close() + f2, err := os.Open(p2) + if err != nil { + return false, err + } + defer f2.Close() + + b1 := make([]byte, compareChuckSize) + b2 := make([]byte, compareChuckSize) + for { + n1, err1 := f1.Read(b1) + if err1 != nil && err1 != io.EOF { + return false, err1 + } + n2, err2 := f2.Read(b2) + if err2 != nil && err2 != io.EOF { + return false, err2 + } + if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { + return false, nil + } + if err1 == io.EOF && err2 == io.EOF { + return true, nil + } + } +} + +func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + p := ¤tPath{ + path: path, + f: f, + fullPath: filepath.Join(root, path), + } + + select { + case <-ctx.Done(): + return ctx.Err() + case pathC <- p: + return nil + } + }) +} + +func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case p := <-pathC: + return p, nil + } +} + +// RootPath joins a path with a root, evaluating and bounding any +// symlink to the root directory. +func RootPath(root, path string) (string, error) { + if path == "" { + return root, nil + } + var linksWalked int // to protect against cycles + for { + i := linksWalked + newpath, err := walkLinks(root, path, &linksWalked) + if err != nil { + return "", err + } + path = newpath + if i == linksWalked { + newpath = filepath.Join("/", newpath) + if path == newpath { + return filepath.Join(root, newpath), nil + } + path = newpath + } + } +} + +func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) { + if *linksWalked > 255 { + return "", false, errTooManyLinks + } + + path = filepath.Join("/", path) + if path == "/" { + return path, false, nil + } + realPath := filepath.Join(root, path) + + fi, err := os.Lstat(realPath) + if err != nil { + // If path does not yet exist, treat as non-symlink + if os.IsNotExist(err) { + return path, false, nil + } + return "", false, err + } + if fi.Mode()&os.ModeSymlink == 0 { + return path, false, nil + } + newpath, err = os.Readlink(realPath) + if err != nil { + return "", false, err + } + *linksWalked++ + return newpath, true, nil +} + +func walkLinks(root, path string, linksWalked *int) (string, error) { + switch dir, file := filepath.Split(path); { + case dir == "": + newpath, _, err := walkLink(root, file, linksWalked) + return newpath, err + case file == "": + if os.IsPathSeparator(dir[len(dir)-1]) { + if dir == "/" { + return dir, nil + } + return walkLinks(root, dir[:len(dir)-1], linksWalked) + } + newpath, _, err := walkLink(root, dir, linksWalked) + return newpath, err + default: + newdir, err := walkLinks(root, dir, linksWalked) + if err != nil { + return "", err + } + newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked) + if err != nil { + return "", err + } + if !islink { + return newpath, nil + } + if filepath.IsAbs(newpath) { + return newpath, nil + } + return filepath.Join(newdir, newpath), nil + } +} diff --git a/vendor/github.com/containerd/continuity/fs/stat_bsd.go b/vendor/github.com/containerd/continuity/fs/stat_bsd.go new file mode 100644 index 000000000..cb7400a33 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/stat_bsd.go @@ -0,0 +1,44 @@ +// +build darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the access time from a stat struct +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atimespec +} + +// StatCtime returns the created time from a stat struct +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctimespec +} + +// StatMtime returns the modified time from a stat struct +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtimespec +} + +// StatATimeAsTime returns the access time as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/stat_linux.go b/vendor/github.com/containerd/continuity/fs/stat_linux.go new file mode 100644 index 000000000..4a678dd1f --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/stat_linux.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the Atim +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// StatCtime returns the Ctim +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctim +} + +// StatMtime returns the Mtim +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} + +// StatATimeAsTime returns st.Atim as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + // The int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/time.go b/vendor/github.com/containerd/continuity/fs/time.go new file mode 100644 index 000000000..cde456123 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/time.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "time" + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go deleted file mode 100644 index b0d5a6b56..000000000 --- a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pathdriver - -import ( - "path/filepath" -) - -// PathDriver provides all of the path manipulation functions in a common -// interface. The context should call these and never use the `filepath` -// package or any other package to manipulate paths. -type PathDriver interface { - Join(paths ...string) string - IsAbs(path string) bool - Rel(base, target string) (string, error) - Base(path string) string - Dir(path string) string - Clean(path string) string - Split(path string) (dir, file string) - Separator() byte - Abs(path string) (string, error) - Walk(string, filepath.WalkFunc) error - FromSlash(path string) string - ToSlash(path string) string - Match(pattern, name string) (matched bool, err error) -} - -// pathDriver is a simple default implementation calls the filepath package. -type pathDriver struct{} - -// LocalPathDriver is the exported pathDriver struct for convenience. -var LocalPathDriver PathDriver = &pathDriver{} - -func (*pathDriver) Join(paths ...string) string { - return filepath.Join(paths...) -} - -func (*pathDriver) IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -func (*pathDriver) Rel(base, target string) (string, error) { - return filepath.Rel(base, target) -} - -func (*pathDriver) Base(path string) string { - return filepath.Base(path) -} - -func (*pathDriver) Dir(path string) string { - return filepath.Dir(path) -} - -func (*pathDriver) Clean(path string) string { - return filepath.Clean(path) -} - -func (*pathDriver) Split(path string) (dir, file string) { - return filepath.Split(path) -} - -func (*pathDriver) Separator() byte { - return filepath.Separator -} - -func (*pathDriver) Abs(path string) (string, error) { - return filepath.Abs(path) -} - -// Note that filepath.Walk calls os.Stat, so if the context wants to -// to call Driver.Stat() for Walk, they need to create a new struct that -// overrides this method. -func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error { - return filepath.Walk(root, walkFn) -} - -func (*pathDriver) FromSlash(path string) string { - return filepath.FromSlash(path) -} - -func (*pathDriver) ToSlash(path string) string { - return filepath.ToSlash(path) -} - -func (*pathDriver) Match(pattern, name string) (bool, error) { - return filepath.Match(pattern, name) -} diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go b/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go new file mode 100644 index 000000000..0bfa6a040 --- /dev/null +++ b/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go @@ -0,0 +1,26 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package syscallx + +import "syscall" + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + return syscall.Readlink(path, buf) +} diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go b/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go new file mode 100644 index 000000000..2ba814990 --- /dev/null +++ b/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go @@ -0,0 +1,112 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package syscallx + +import ( + "syscall" + "unsafe" +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +const ( + _IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + _SYMLINK_FLAG_RELATIVE = 1 +) + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer syscall.CloseHandle(fd) + + rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case syscall.IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) + if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 { + if len(s) >= 4 && s[:4] == `\??\` { + s = s[4:] + switch { + case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar + // do nothing + case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar + s = `\\` + s[4:] + default: + // unexpected; do nothing + } + } else { + // unexpected; do nothing + } + } + case _IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) + if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar + if len(s) < 48 || s[:11] != `\??\Volume{` { + s = s[4:] + } + } else { + // unexpected; do nothing + } + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} diff --git a/vendor/github.com/containerd/continuity/sysx/README.md b/vendor/github.com/containerd/continuity/sysx/README.md new file mode 100644 index 000000000..ad7aee533 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/README.md @@ -0,0 +1,3 @@ +This package is for internal use only. It is intended to only have +temporary changes before they are upstreamed to golang.org/x/sys/ +(a.k.a. https://github.com/golang/sys). diff --git a/vendor/github.com/containerd/continuity/sysx/file_posix.go b/vendor/github.com/containerd/continuity/sysx/file_posix.go new file mode 100644 index 000000000..e28f3a1b5 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/file_posix.go @@ -0,0 +1,128 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "os" + "path/filepath" + + "github.com/containerd/continuity/syscallx" +) + +// Readlink returns the destination of the named symbolic link. +// If there is an error, it will be of type *PathError. +func Readlink(name string) (string, error) { + for len := 128; ; len *= 2 { + b := make([]byte, len) + n, e := fixCount(syscallx.Readlink(fixLongPath(name), b)) + if e != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: e} + } + if n < len { + return string(b[0:n]), nil + } + } +} + +// Many functions in package syscall return a count of -1 instead of 0. +// Using fixCount(call()) instead of call() corrects the count. +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} + +// fixLongPath returns the extended-length (\\?\-prefixed) form of +// path when needed, in order to avoid the default 260 character file +// path limit imposed by Windows. If path is not easily converted to +// the extended-length form (for example, if path is a relative path +// or contains .. elements), or is short enough, fixLongPath returns +// path unmodified. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath +func fixLongPath(path string) string { + // Do nothing (and don't allocate) if the path is "short". + // Empirically (at least on the Windows Server 2013 builder), + // the kernel is arbitrarily okay with < 248 bytes. That + // matches what the docs above say: + // "When using an API to create a directory, the specified + // path cannot be so long that you cannot append an 8.3 file + // name (that is, the directory name cannot exceed MAX_PATH + // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. + // + // The MSDN docs appear to say that a normal path that is 248 bytes long + // will work; empirically the path must be less then 248 bytes long. + if len(path) < 248 { + // Don't fix. (This is how Go 1.7 and earlier worked, + // not automatically generating the \\?\ form) + return path + } + + // The extended form begins with \\?\, as in + // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. + // The extended form disables evaluation of . and .. path + // elements and disables the interpretation of / as equivalent + // to \. The conversion here rewrites / to \ and elides + // . elements as well as trailing or duplicate separators. For + // simplicity it avoids the conversion entirely for relative + // paths or paths containing .. elements. For now, + // \\server\share paths are not converted to + // \\?\UNC\server\share paths because the rules for doing so + // are less well-specified. + if len(path) >= 2 && path[:2] == `\\` { + // Don't canonicalize UNC paths. + return path + } + if !filepath.IsAbs(path) { + // Relative path + return path + } + + const prefix = `\\?` + + pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) + copy(pathbuf, prefix) + n := len(path) + r, w := 0, len(prefix) + for r < n { + switch { + case os.IsPathSeparator(path[r]): + // empty block + r++ + case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): + // /./ + r++ + case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): + // /../ is currently unhandled + return path + default: + pathbuf[w] = '\\' + w++ + for ; r < n && !os.IsPathSeparator(path[r]); r++ { + pathbuf[w] = path[r] + w++ + } + } + } + // A drive's root directory needs a trailing \ + if w == len(`\\?\c:`) { + pathbuf[w] = '\\' + w++ + } + return string(pathbuf[:w]) +} diff --git a/vendor/github.com/containerd/continuity/sysx/generate.sh b/vendor/github.com/containerd/continuity/sysx/generate.sh new file mode 100644 index 000000000..87d708d7a --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/generate.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl" + +fix() { + sed 's,^package syscall$,package sysx,' \ + | sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \ + | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \ + | gofmt -r='Syscall6 -> syscall.Syscall6' \ + | gofmt -r='Syscall -> syscall.Syscall' \ + | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \ + | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \ + | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \ + | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \ + | gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \ + | gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \ + | gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \ + | gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR' +} + +if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then + echo "Must specify \$GOARCH and \$GOOS" + exit 1 +fi + +mkargs="" + +if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then + mkargs="-l32" +fi + +for f in "$@"; do + $mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go" +done + diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go new file mode 100644 index 000000000..28ce5d8de --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENODATA diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go new file mode 100644 index 000000000..e0575f446 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go @@ -0,0 +1,24 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +// This should actually be a set that contains ENOENT and EPERM +const ENODATA = syscall.ENOENT diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go new file mode 100644 index 000000000..b26f5b3d0 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go @@ -0,0 +1,25 @@ +// +build darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENOATTR diff --git a/vendor/github.com/containerd/continuity/sysx/xattr.go b/vendor/github.com/containerd/continuity/sysx/xattr.go new file mode 100644 index 000000000..9e4326dcf --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr.go @@ -0,0 +1,125 @@ +// +build linux darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "bytes" + "syscall" + + "golang.org/x/sys/unix" +) + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Listxattr) +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unix.Removexattr(path, attr) +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Setxattr(path, attr, data, flags) +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Getxattr) +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Llistxattr) +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unix.Lremovexattr(path, attr) +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Lsetxattr(path, attr, data, flags) +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Lgetxattr) +} + +const defaultXattrBufferSize = 5 + +type listxattrFunc func(path string, dest []byte) (int, error) + +func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { + var p []byte // nil on first execution + + for { + n, err := listFunc(path, p) // first call gets buffer size. + if err != nil { + return nil, err + } + + if n > len(p) { + p = make([]byte, n) + continue + } + + p = p[:n] + + ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0}) + var entries []string + for _, p := range ps { + s := string(p) + if s != "" { + entries = append(entries, s) + } + } + + return entries, nil + } +} + +type getxattrFunc func(string, string, []byte) (int, error) + +func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { + p := make([]byte, defaultXattrBufferSize) + for { + n, err := getFunc(path, attr, p) + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE { + p = make([]byte, len(p)*2) // this can't be ideal. + continue // try again! + } + + return nil, err + } + + // realloc to correct size and repeat + if n > len(p) { + p = make([]byte, n) + continue + } + + return p[:n], nil + } +} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go new file mode 100644 index 000000000..c9ef3a1d2 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go @@ -0,0 +1,67 @@ +// +build !linux,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "errors" + "runtime" +) + +var unsupported = errors.New("extended attributes unsupported on " + runtime.GOOS) + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return []string{}, nil +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unsupported +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return []byte{}, unsupported +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return []string{}, nil +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unsupported +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return []byte{}, nil +} diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 0db48932c..c75a055a6 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,32 @@ # Changelog +## v1.11.3 (2019-10-04) + Update c/image to v4.0.1 + Bump github.com/spf13/pflag from 1.0.3 to 1.0.5 + Fix --build-args handling + Bump github.com/spf13/cobra from 0.0.3 to 0.0.5 + Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2 + Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1 + Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4 + Add support for retrieving context from stdin "-" + Ensure bud remote context cleans up on error + info: add cgroups2 + Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1 + Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6 + Bump github.com/stretchr/testify from 1.3.0 to 1.4.0 + Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0 + Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3 + Bump github.com/onsi/gomega from 1.5.0 to 1.7.0 + update c/storage to v1.13.4 + Print build 'STEP' line to stdout, not stderr + Fix travis-ci on forks + Vendor c/storage v1.13.3 + Use Containerfile by default + Added tutorial on how to include Buildah as library + util/util: Fix "configuraitno" -> "configuration" log typo + Bump back to v1.12.0-dev + ## v1.11.2 (2019-09-13) Add some cleanup code Move devices code to unit specific directory. diff --git a/vendor/github.com/containers/buildah/OWNERS b/vendor/github.com/containers/buildah/OWNERS new file mode 100644 index 000000000..5e6cbfdfa --- /dev/null +++ b/vendor/github.com/containers/buildah/OWNERS @@ -0,0 +1,22 @@ +approvers: + - TomSweeneyRedHat + - cevich + - giuseppe + - nalind + - rhatdan + - vrothberg +reviewers: + - QiWang19 + - TomSweeneyRedHat + - baude + - cevich + - edsantiago + - giuseppe + - haircommander + - jwhonce + - mheon + - mrunalp + - nalind + - rhatdan + - umohnani8 + - vrothberg diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 6f974ba86..59b62925a 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -12,7 +12,7 @@ import ( "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/ioutils" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -27,7 +27,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.11.3" + Version = "1.11.4" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 359ff5227..6e98e5405 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,25 @@ +- Changelog for v1.11.4 (2019-10-28) + * buildah: add a "manifest" command + * manifests: add the module + * pkg/supplemented: add a package for grouping images together + * pkg/manifests: add a manifest list build/manipulation API + * Update for ErrUnauthorizedForCredentials API change in containers/image + * Update for manifest-lists API changes in containers/image + * version: also note the version of containers/image + * Move to containers/image v5.0.0 + * Enable --device directory as src device + * Fix git build with branch specified + * Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1 + * Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0 + * Add clarification to the Tutorial for new users + * Silence "using cache" to ensure -q is fully quiet + * Add OWNERS File to Buildah + * Bump github.com/containers/storage from 1.13.4 to 1.13.5 + * Move runtime flag to bud from common + * Commit: check for storage.ErrImageUnknown using errors.Cause() + * Fix crash when invalid COPY --from flag is specified. + * Bump back to v1.12.0-dev + - Changelog for v1.11.3 (2019-10-04) * Update c/image to v4.0.1 * Bump github.com/spf13/pflag from 1.0.3 to 1.0.5 diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index f46609525..4df3b9908 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -12,14 +12,14 @@ import ( "github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/util" - cp "github.com/containers/image/v4/copy" - "github.com/containers/image/v4/docker" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/signature" - is "github.com/containers/image/v4/storage" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/stringid" @@ -96,7 +96,7 @@ type PushOptions struct { // github.com/containers/image/types SystemContext to hold credentials // and other authentication/authorization information. SystemContext *types.SystemContext - // ManifestType is the format to use when saving the imge using the 'dir' transport + // ManifestType is the format to use when saving the image using the 'dir' transport // possible options are oci, v2s1, and v2s2 ManifestType string // BlobDirectory is the name of a directory in which we'll look for @@ -309,7 +309,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } img, err := is.Transport.GetStoreImage(b.store, dest) - if err != nil && err != storage.ErrImageUnknown { + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { return imgID, nil, "", errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(dest)) } if err == nil { diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go index a8b29231d..d2e9dc732 100644 --- a/vendor/github.com/containers/buildah/common.go +++ b/vendor/github.com/containers/buildah/common.go @@ -6,8 +6,8 @@ import ( "path/filepath" "github.com/containers/buildah/pkg/unshare" - cp "github.com/containers/image/v4/copy" - "github.com/containers/image/v4/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/types" "github.com/containers/storage" ) diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index 49b1930c5..617619e45 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -8,9 +8,9 @@ import ( "time" "github.com/containers/buildah/docker" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/stringid" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go index 4b62e0e31..561287ac2 100644 --- a/vendor/github.com/containers/buildah/docker/types.go +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -7,7 +7,7 @@ package docker import ( "time" - "github.com/containers/image/v4/pkg/strslice" + "github.com/containers/image/v5/pkg/strslice" digest "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index 0bd592d48..6bba4a1f8 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -5,8 +5,8 @@ go 1.12 require ( github.com/blang/semver v3.5.0+incompatible // indirect github.com/containernetworking/cni v0.7.1 - github.com/containers/image/v4 v4.0.1 - github.com/containers/storage v1.13.4 + github.com/containers/image/v5 v5.0.0 + github.com/containers/storage v1.13.5 github.com/cyphar/filepath-securejoin v0.2.2 github.com/docker/distribution v2.7.1+incompatible github.com/docker/docker-credential-helpers v0.6.1 // indirect @@ -14,7 +14,7 @@ require ( github.com/docker/go-units v0.4.0 github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 github.com/etcd-io/bbolt v1.3.3 - github.com/fsouza/go-dockerclient v1.4.4 + github.com/fsouza/go-dockerclient v1.5.0 github.com/ghodss/yaml v1.0.0 github.com/hashicorp/go-multierror v1.0.0 github.com/imdario/mergo v0.3.6 // indirect @@ -30,7 +30,7 @@ require ( github.com/opencontainers/runtime-tools v0.9.0 github.com/opencontainers/selinux v1.3.0 github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible - github.com/openshift/imagebuilder v1.1.0 + github.com/openshift/imagebuilder v1.1.1 github.com/pkg/errors v0.8.1 github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 github.com/seccomp/libseccomp-golang v0.9.1 @@ -42,7 +42,7 @@ require ( github.com/vishvananda/netlink v1.0.0 // indirect github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect github.com/xeipuuv/gojsonschema v1.1.0 // indirect - golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 + golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 // indirect google.golang.org/grpc v1.24.0 // indirect diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index 6ebb9f91f..e5ce6a290 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -28,21 +28,23 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE= -github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image v4.0.0+incompatible h1:CfKbemfowbIg3nhq8rvtI+sdU9QbvODkiD+JLpOJMiQ= -github.com/containers/image v4.0.0+incompatible/go.mod h1:Td6tqqQu0miIBO8mauyzsVqBbv5WhKSE4pH2ZwslVp0= github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ= github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= +github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4= +github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/storage v1.13.4 h1:j0bBaJDKbUHtAW1MXPFnwXJtqcH+foWeuXK1YaBV5GA= github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= +github.com/containers/storage v1.13.5 h1:/SUzGeOP2HDijpF7Yur21Ch6WTZC1BNeZF917CWcp5c= +github.com/containers/storage v1.13.5/go.mod h1:HELz8Sn+UVbPaUZMI8RvIG9doD4y4z6Gtg4k7xdd2ZY= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -60,6 +62,8 @@ github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5N github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU= github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g= github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -84,6 +88,8 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/go-dockerclient v1.4.4 h1:Sd5nD4wdAgiPxvrbYUzT2ZZNmPk3z+GGnZ+frvw8z04= github.com/fsouza/go-dockerclient v1.4.4/go.mod h1:PrwszSL5fbmsESocROrOGq/NULMXRw+bajY0ltzD6MA= +github.com/fsouza/go-dockerclient v1.5.0 h1:7OtayOe5HnoG+KWMHgyyPymwaodnB2IDYuVfseKyxbA= +github.com/fsouza/go-dockerclient v1.5.0/go.mod h1:AqZZK/zFO3phxYxlTsAaeAMSdQ9mgHuhy+bjN034Qds= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -101,6 +107,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -190,16 +197,11 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA= -github.com/mtrmac/image/v4 v4.0.0-20191001213151-121ffca6db69 h1:TVWS7od6UeGhdYqgXn/+EIDlulkGGV+r6FnjoxRJAl0= -github.com/mtrmac/image/v4 v4.0.0-20191001213151-121ffca6db69/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= -github.com/mtrmac/image/v4 v4.0.0-20191002203927-a64d9d2717f4 h1:AE5cilZfrGtAgMg5Ed4c2Y2KczlOsMVZAK055sSq+gc= -github.com/mtrmac/image/v4 v4.0.0-20191002203927-a64d9d2717f4/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= -github.com/mtrmac/image/v4 v4.0.0-20191003181245-f4c983e93262 h1:HMUEnWU3OPT09JRFQLn8VTp3GfdfiEhDMAEhkdX8QnA= -github.com/mtrmac/image/v4 v4.0.0-20191003181245-f4c983e93262/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= @@ -238,6 +240,8 @@ github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s5 github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYMeFI18c4= github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= +github.com/openshift/imagebuilder v1.1.1 h1:KAUR31p8UBJdfVO42azWgb+LeMAed2zaKQ19e0C0X2I= +github.com/openshift/imagebuilder v1.1.1/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -299,8 +303,6 @@ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= @@ -329,6 +331,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90Pveol golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -390,6 +394,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 829d8c21d..79c75ce0b 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -13,11 +13,11 @@ import ( "time" "github.com/containers/buildah/docker" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/manifest" - is "github.com/containers/image/v4/storage" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" @@ -596,7 +596,7 @@ func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest * return i.manifest, i.manifestType, nil } -func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { +func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { return nil, nil } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index fe6cc266a..6b2c9c84c 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -13,8 +13,8 @@ import ( "strings" "github.com/containers/buildah" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/runc/libcontainer/configs" diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 136261bf0..27ec1bb23 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -12,11 +12,11 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/util" - "github.com/containers/image/v4/docker/reference" - is "github.com/containers/image/v4/storage" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/transports/alltransports" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/docker/reference" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" v1 "github.com/opencontainers/image-spec/specs-go/v1" diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 114d250a4..fad2bfe95 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -13,12 +13,12 @@ import ( "github.com/containers/buildah" buildahdocker "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - cp "github.com/containers/image/v4/copy" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - is "github.com/containers/image/v4/storage" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" securejoin "github.com/cyphar/filepath-securejoin" @@ -759,6 +759,12 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b s.executor.log(commitMessage) } } + logCacheHit := func(cacheID string) { + if !s.executor.quiet { + cacheHitMessage := "--> Using cache" + fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, cacheID) + } + } logImageID := func(imgID string) { if s.executor.iidfile == "" { fmt.Fprintf(s.executor.out, "%s\n", imgID) @@ -816,6 +822,9 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b if strings.Contains(n, "--from") && (command == "COPY" || command == "ADD") { var mountPoint string arr := strings.Split(n, "=") + if len(arr) != 2 { + return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=", command) + } otherStage, ok := s.executor.stages[arr[1]] if !ok { if mountPoint, err = s.getImageRootfs(ctx, stage, arr[1]); err != nil { @@ -906,7 +915,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b } if cacheID != "" { // Note the cache hit. - fmt.Fprintf(s.executor.out, "--> Using cache %s\n", cacheID) + logCacheHit(cacheID) } else { // We're not going to find any more cache hits. checkForLayers = false diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go index 7a94d9974..520b92e3f 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/util.go +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -23,8 +23,15 @@ func cloneToDirectory(url, dir string) error { if !strings.HasPrefix(url, "git://") && !strings.HasSuffix(url, ".git") { url = "git://" + url } - logrus.Debugf("cloning %q to %q", url, dir) - cmd := exec.Command("git", "clone", url, dir) + gitBranch := strings.Split(url, "#") + var cmd *exec.Cmd + if len(gitBranch) < 2 { + logrus.Debugf("cloning %q to %q", url, dir) + cmd = exec.Command("git", "clone", url, dir) + } else { + logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir) + cmd = exec.Command("git", "clone", "-b", gitBranch[1], gitBranch[0], dir) + } return cmd.Run() } diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index 4d3059527..751ce6ae1 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -5,9 +5,9 @@ import ( "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - "github.com/containers/image/v4/manifest" - is "github.com/containers/image/v4/storage" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 216a96611..87cfd5d0d 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -7,12 +7,12 @@ import ( "strings" "github.com/containers/buildah/util" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/pkg/sysregistriesv2" - is "github.com/containers/image/v4/storage" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/transports/alltransports" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/sysregistriesv2" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/openshift/imagebuilder" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go index 539c894a3..b7f704615 100644 --- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -10,11 +10,11 @@ import ( "sync" "github.com/containers/buildah/docker" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" @@ -263,14 +263,14 @@ func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *dig return s.source.GetSignatures(ctx, instanceDigest) } -func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - signatures, err := s.source.GetSignatures(ctx, nil) +func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + signatures, err := s.source.GetSignatures(ctx, instanceDigest) if err != nil { return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) } canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) - infos, err := s.source.LayerInfosForCopy(ctx) + infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) if err != nil { return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) } @@ -515,7 +515,7 @@ func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.Bl return false, types.BlobInfo{}, nil } -func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte) error { +func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) @@ -525,13 +525,13 @@ func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes [] logrus.Warnf("error saving manifest as %q: %v", filename, err) } } - return d.destination.PutManifest(ctx, manifestBytes) + return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) } -func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - return d.destination.PutSignatures(ctx, signatures) +func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.destination.PutSignatures(ctx, signatures, instanceDigest) } -func (d *blobCacheDestination) Commit(ctx context.Context) error { - return d.destination.Commit(ctx) +func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.destination.Commit(ctx, unparsedToplevel) } diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index 06aec96ea..a9bf94a32 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "path/filepath" + "runtime" "strings" "github.com/containers/buildah" @@ -95,6 +96,8 @@ type FromAndBudResults struct { Isolation string Memory string MemorySwap string + OverrideArch string + OverrideOS string SecurityOpt []string ShmSize string Ulimit []string @@ -161,7 +164,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image, even if a version is present") fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress") fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build") - fs.StringVar(&flags.Runtime, "runtime", util.Runtime(), "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.") + // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/bud.go. fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime") fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)") fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer.") @@ -194,6 +197,14 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: [], where unit = b, k, m or g)") fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap") + fs.StringVar(&flags.OverrideOS, "override-os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images") + if err := fs.MarkHidden("override-os"); err != nil { + panic(fmt.Sprintf("error marking override-os as hidden: %v", err)) + } + fs.StringVar(&flags.OverrideArch, "override-arch", runtime.GOARCH, "prefer `ARCH` instead of the architecture of the machine when pulling images") + if err := fs.MarkHidden("override-arch"); err != nil { + panic(fmt.Sprintf("error marking override-arch as hidden: %v", err)) + } fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])") fs.StringVar(&flags.ShmSize, "shm-size", "65536k", "size of '/dev/shm'. The format is ``.") fs.StringSliceVar(&flags.Ulimit, "ulimit", []string{}, "ulimit options (default [])") diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index f0996315a..9194ddf58 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -14,7 +14,7 @@ import ( "unicode" "github.com/containers/buildah" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/idtools" units "github.com/docker/go-units" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -583,6 +583,12 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { ctx.RegistriesDirPath = regConfDir } ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", buildah.Version) + if os, err := c.Flags().GetString("override-os"); err == nil { + ctx.OSChoice = os + } + if arch, err := c.Flags().GetString("override-arch"); err == nil { + ctx.ArchitectureChoice = arch + } return ctx, nil } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go index 238293894..1aaeca278 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go @@ -4,6 +4,8 @@ package parse import ( "fmt" + "os" + "path/filepath" "github.com/containers/buildah/pkg/unshare" "github.com/opencontainers/runc/libcontainer/configs" @@ -24,18 +26,40 @@ func getDefaultProcessLimits() []string { return defaultLimits } -func DeviceFromPath(device string) (configs.Device, error) { +func DeviceFromPath(device string) ([]configs.Device, error) { + var devs []configs.Device src, dst, permissions, err := Device(device) if err != nil { - return configs.Device{}, err + return nil, err } if unshare.IsRootless() { - return configs.Device{}, errors.Errorf("Renaming device %s to %s is not a supported in rootless containers", src, dst) + return nil, errors.Errorf("Renaming device %s to %s is not a supported in rootless containers", src, dst) } - dev, err := devices.DeviceFromPath(src, permissions) + srcInfo, err := os.Stat(src) if err != nil { - return configs.Device{}, errors.Wrapf(err, "%s is not a valid device", src) + return nil, errors.Wrapf(err, "error getting info of source device %s", src) } - dev.Path = dst - return *dev, nil + + if !srcInfo.IsDir() { + + dev, err := devices.DeviceFromPath(src, permissions) + if err != nil { + return nil, errors.Wrapf(err, "%s is not a valid device", src) + } + dev.Path = dst + devs = append(devs, *dev) + return devs, nil + } + + // If source device is a directory + srcDevices, err := devices.GetDevices(src) + if err != nil { + return nil, errors.Wrapf(err, "error getting source devices from directory %s", src) + } + for _, d := range srcDevices { + d.Path = filepath.Join(dst, filepath.Base(d.Path)) + d.Permissions = permissions + devs = append(devs, *d) + } + return devs, nil } diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index 60dc3693f..300f3b396 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -8,18 +8,18 @@ import ( "github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/util" - cp "github.com/containers/image/v4/copy" - "github.com/containers/image/v4/directory" - "github.com/containers/image/v4/docker" - dockerarchive "github.com/containers/image/v4/docker/archive" - "github.com/containers/image/v4/docker/reference" - tarfile "github.com/containers/image/v4/docker/tarfile" - ociarchive "github.com/containers/image/v4/oci/archive" - oci "github.com/containers/image/v4/oci/layout" - "github.com/containers/image/v4/signature" - is "github.com/containers/image/v4/storage" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/directory" + "github.com/containers/image/v5/docker" + dockerarchive "github.com/containers/image/v5/docker/archive" + "github.com/containers/image/v5/docker/reference" + tarfile "github.com/containers/image/v5/docker/tarfile" + ociarchive "github.com/containers/image/v5/oci/archive" + oci "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 06492fad2..44db18b45 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -8,9 +8,9 @@ import ( "path/filepath" "github.com/containers/buildah/util" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/pkg/sysregistriesv2" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index a572d1405..d5e842315 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -10,12 +10,12 @@ import ( "syscall" "github.com/containers/buildah/pkg/cgroups" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/pkg/sysregistriesv2" - "github.com/containers/image/v4/signature" - is "github.com/containers/image/v4/storage" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/docker/distribution/registry/api/errcode" specs "github.com/opencontainers/runtime-spec/specs-go" diff --git a/vendor/github.com/containers/image/v4/LICENSE b/vendor/github.com/containers/image/v4/LICENSE deleted file mode 100644 index 953563530..000000000 --- a/vendor/github.com/containers/image/v4/LICENSE +++ /dev/null @@ -1,189 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/image/v4/copy/copy.go b/vendor/github.com/containers/image/v4/copy/copy.go deleted file mode 100644 index 30d8a4464..000000000 --- a/vendor/github.com/containers/image/v4/copy/copy.go +++ /dev/null @@ -1,975 +0,0 @@ -package copy - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "reflect" - "runtime" - "strings" - "sync" - "time" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/pkg/blobinfocache" - "github.com/containers/image/v4/pkg/compression" - "github.com/containers/image/v4/signature" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vbauerster/mpb" - "github.com/vbauerster/mpb/decor" - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/sync/semaphore" -) - -type digestingReader struct { - source io.Reader - digester digest.Digester - expectedDigest digest.Digest - validationFailed bool - validationSucceeded bool -} - -// maxParallelDownloads is used to limit the maxmimum number of parallel -// downloads. Let's follow Firefox by limiting it to 6. -var maxParallelDownloads = 6 - -// compressionBufferSize is the buffer size used to compress a blob -var compressionBufferSize = 1048576 - -// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error -// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. -// (neither is set if EOF is never reached). -func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { - if err := expectedDigest.Validate(); err != nil { - return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) - } - digestAlgorithm := expectedDigest.Algorithm() - if !digestAlgorithm.Available() { - return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) - } - return &digestingReader{ - source: source, - digester: digestAlgorithm.Digester(), - expectedDigest: expectedDigest, - validationFailed: false, - }, nil -} - -func (d *digestingReader) Read(p []byte) (int, error) { - n, err := d.source.Read(p) - if n > 0 { - if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { - // Coverage: This should not happen, the hash.Hash interface requires - // d.digest.Write to never return an error, and the io.Writer interface - // requires n2 == len(input) if no error is returned. - return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n) - } - } - if err == io.EOF { - actualDigest := d.digester.Digest() - if actualDigest != d.expectedDigest { - d.validationFailed = true - return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) - } - d.validationSucceeded = true - } - return n, err -} - -// copier allows us to keep track of diffID values for blobs, and other -// data shared across one or more images in a possible manifest list. -type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache types.BlobInfoCache - copyInParallel bool - compressionFormat compression.Algorithm - compressionLevel *int -} - -// imageCopier tracks state specific to a single image (possibly an item of a manifest list) -type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src types.Image - diffIDsAreNeeded bool - canModifyManifest bool - canSubstituteBlobs bool -} - -// Options allows supplying non-default configuration modifying the behavior of CopyImage. -type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. - SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), - ReportWriter io.Writer - SourceCtx *types.SystemContext - DestinationCtx *types.SystemContext - ProgressInterval time.Duration // time to wait between reports to signal the progress channel - Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. - // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type - ForceManifestMIMEType string -} - -// Image copies image from srcRef to destRef, using policyContext to validate -// source image admissibility. It returns the manifest which was written to -// the new copy of the image. -func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) { - // NOTE this function uses an output parameter for the error return value. - // Setting this and returning is the ideal way to return an error. - // - // the defers in this routine will wrap the error return with its own errors - // which can be valuable context in the middle of a multi-streamed copy. - if options == nil { - options = &Options{} - } - - reportWriter := ioutil.Discard - - if options.ReportWriter != nil { - reportWriter = options.ReportWriter - } - - dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) - if err != nil { - return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) - } - defer func() { - if err := dest.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (dest: %v)", err) - } - }() - - rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) - if err != nil { - return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) - } - defer func() { - if err := rawSource.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (src: %v)", err) - } - }() - - // If reportWriter is not a TTY (e.g., when piping to a file), do not - // print the progress bars to avoid long and hard to parse output. - // createProgressBar() will print a single line instead. - progressOutput := reportWriter - if !isTTY(reportWriter) { - progressOutput = ioutil.Discard - } - copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() - - c := &copier{ - dest: dest, - rawSource: rawSource, - reportWriter: reportWriter, - progressOutput: progressOutput, - progressInterval: options.ProgressInterval, - progress: options.Progress, - copyInParallel: copyInParallel, - // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. - // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually - // we might want to add a separate CommonCtx — or would that be too confusing? - blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), - } - // Default to using gzip compression unless specified otherwise. - if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { - algo, err := compression.AlgorithmByName("gzip") - if err != nil { - return nil, err - } - c.compressionFormat = algo - } else { - c.compressionFormat = *options.DestinationCtx.CompressionFormat - } - if options.DestinationCtx != nil { - // Note that the compressionLevel can be nil. - c.compressionLevel = options.DestinationCtx.CompressionLevel - } - - unparsedToplevel := image.UnparsedInstance(rawSource, nil) - multiImage, err := isMultiImage(ctx, unparsedToplevel) - if err != nil { - return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) - } - - if !multiImage { - // The simple case: Just copy a single image. - if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil { - return nil, err - } - } else { - // This is a manifest list. Choose a single image and copy it. - // FIXME: Copy to destinations which support manifest lists, one image at a time. - instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel) - if err != nil { - return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) - } - logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) - unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) - - if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil { - return nil, err - } - } - - if err := c.dest.Commit(ctx); err != nil { - return nil, errors.Wrap(err, "Error committing the finished image") - } - - return manifest, nil -} - -// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate -// source image admissibility. -func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) { - // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. - // Make sure we fail cleanly in such cases. - multiImage, err := isMultiImage(ctx, unparsedImage) - if err != nil { - // FIXME FIXME: How to name a reference for the sub-image? - return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) - } - if multiImage { - return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") - } - - // Please keep this policy check BEFORE reading any other information about the image. - // (the multiImage check above only matches the MIME type, which we have received anyway. - // Actual parsing of anything should be deferred.) - if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return nil, errors.Wrap(err, "Source image rejected") - } - src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) - if err != nil { - return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) - } - - // If the destination is a digested reference, make a note of that, determine what digest value we're - // expecting, and check that the source manifest matches it. - destIsDigestedReference := false - if named := c.dest.Reference().DockerReference(); named != nil { - if digested, ok := named.(reference.Digested); ok { - destIsDigestedReference = true - sourceManifest, _, err := src.Manifest(ctx) - if err != nil { - return nil, errors.Wrapf(err, "Error reading manifest from source image") - } - matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) - if err != nil { - return nil, errors.Wrapf(err, "Error computing digest of source image's manifest") - } - if !matches { - return nil, errors.New("Digest of source image's manifest would not match destination reference") - } - } - } - - if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil { - return nil, err - } - - var sigs [][]byte - if options.RemoveSignatures { - sigs = [][]byte{} - } else { - c.Printf("Getting image source signatures\n") - s, err := src.Signatures(ctx) - if err != nil { - return nil, errors.Wrap(err, "Error reading signatures") - } - sigs = s - } - if len(sigs) != 0 { - c.Printf("Checking if image destination supports signatures\n") - if err := c.dest.SupportsSignatures(ctx); err != nil { - return nil, errors.Wrap(err, "Can not copy signatures") - } - } - - ic := imageCopier{ - c: c, - manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, - src: src, - // diffIDsAreNeeded is computed later - canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, - } - // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. - // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: - // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. - // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk - // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, - // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" - - if err := ic.updateEmbeddedDockerReference(); err != nil { - return nil, err - } - - // We compute preferredManifestMIMEType only to show it in error messages. - // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) - if err != nil { - return nil, err - } - - // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. - ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - - if err := ic.copyLayers(ctx); err != nil { - return nil, err - } - - // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; - // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support - // without actually trying to upload something and getting a types.ManifestTypeRejectedError. - // So, try the preferred manifest MIME type. If the process succeeds, fine… - manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx) - if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) - // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. - if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { - // We don’t have other options. - // In principle the code below would handle this as well, but the resulting error message is fairly ugly. - // Don’t bother the user with MIME types if we have no choice. - return nil, err - } - // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. - // So if we are here, we will definitely be trying to convert the manifest. - // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, - // so let’s bail out early and with a better error message. - if !ic.canModifyManifest { - return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") - } - - // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} - for _, manifestMIMEType := range otherManifestMIMETypeCandidates { - logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) - ic.manifestUpdates.ManifestMIMEType = manifestMIMEType - attemptedManifest, err := ic.copyUpdatedConfigAndManifest(ctx) - if err != nil { - logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) - errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) - continue - } - - // We have successfully uploaded a manifest. - manifestBytes = attemptedManifest - errs = nil // Mark this as a success so that we don't abort below. - break - } - if errs != nil { - return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) - } - } - - if options.SignBy != "" { - newSig, err := c.createSignature(manifestBytes, options.SignBy) - if err != nil { - return nil, err - } - sigs = append(sigs, newSig) - } - - c.Printf("Storing signatures\n") - if err := c.dest.PutSignatures(ctx, sigs); err != nil { - return nil, errors.Wrap(err, "Error writing signatures") - } - - return manifestBytes, nil -} - -// Printf writes a formatted string to c.reportWriter. -// Note that the method name Printf is not entirely arbitrary: (go tool vet) -// has a built-in list of functions/methods (whatever object they are for) -// which have their format strings checked; for other names we would have -// to pass a parameter to every (go tool vet) invocation. -func (c *copier) Printf(format string, a ...interface{}) { - fmt.Fprintf(c.reportWriter, format, a...) -} - -func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { - if dest.MustMatchRuntimeOS() { - wantedOS := runtime.GOOS - if sys != nil && sys.OSChoice != "" { - wantedOS = sys.OSChoice - } - c, err := src.OCIConfig(ctx) - if err != nil { - return errors.Wrapf(err, "Error parsing image configuration") - } - osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) - if wantedOS == "windows" && c.OS == "linux" { - return osErr - } else if wantedOS != "windows" && c.OS == "windows" { - return osErr - } - } - return nil -} - -// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. -func (ic *imageCopier) updateEmbeddedDockerReference() error { - if ic.c.dest.IgnoresEmbeddedDockerReference() { - return nil // Destination would prefer us not to update the embedded reference. - } - destRef := ic.c.dest.Reference().DockerReference() - if destRef == nil { - return nil // Destination does not care about Docker references - } - if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { - return nil // No reference embedded in the manifest, or it matches destRef already. - } - - if !ic.canModifyManifest { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", - transports.ImageName(ic.c.dest.Reference()), destRef.String()) - } - ic.manifestUpdates.EmbeddedDockerReference = destRef - return nil -} - -// isTTY returns true if the io.Writer is a file and a tty. -func isTTY(w io.Writer) bool { - if f, ok := w.(*os.File); ok { - return terminal.IsTerminal(int(f.Fd())) - } - return false -} - -// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. -func (ic *imageCopier) copyLayers(ctx context.Context) error { - srcInfos := ic.src.LayerInfos() - numLayers := len(srcInfos) - updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) - if err != nil { - return err - } - srcInfosUpdated := false - if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if !ic.canModifyManifest { - return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") - } - srcInfos = updatedSrcInfos - srcInfosUpdated = true - } - - type copyLayerData struct { - destInfo types.BlobInfo - diffID digest.Digest - err error - } - - // copyGroup is used to determine if all layers are copied - copyGroup := sync.WaitGroup{} - copyGroup.Add(numLayers) - - // copySemaphore is used to limit the number of parallel downloads to - // avoid malicious images causing troubles and to be nice to servers. - var copySemaphore *semaphore.Weighted - if ic.c.copyInParallel { - copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) - } else { - copySemaphore = semaphore.NewWeighted(int64(1)) - } - - data := make([]copyLayerData, numLayers) - copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { - defer copySemaphore.Release(1) - defer copyGroup.Done() - cld := copyLayerData{} - if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { - // DiffIDs are, currently, needed only when converting from schema1. - // In which case src.LayerInfos will not have URLs because schema1 - // does not support them. - if ic.diffIDsAreNeeded { - cld.err = errors.New("getting DiffID for foreign layers is unimplemented") - } else { - cld.destInfo = srcLayer - logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) - } - } else { - cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) - } - data[index] = cld - } - - func() { // A scope for defer - progressPool, progressCleanup := ic.c.newProgressPool(ctx) - defer progressCleanup() - - for i, srcLayer := range srcInfos { - copySemaphore.Acquire(ctx, 1) - go copyLayerHelper(i, srcLayer, progressPool) - } - - // Wait for all layers to be copied - copyGroup.Wait() - }() - - destInfos := make([]types.BlobInfo, numLayers) - diffIDs := make([]digest.Digest, numLayers) - for i, cld := range data { - if cld.err != nil { - return cld.err - } - destInfos[i] = cld.destInfo - diffIDs[i] = cld.diffID - } - - ic.manifestUpdates.InformationOnly.LayerInfos = destInfos - if ic.diffIDsAreNeeded { - ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs - } - if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { - ic.manifestUpdates.LayerInfos = destInfos - } - return nil -} - -// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) -func layerDigestsDiffer(a, b []types.BlobInfo) bool { - if len(a) != len(b) { - return true - } - for i := range a { - if a[i].Digest != b[i].Digest { - return true - } - } - return false -} - -// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, -// stores the resulting config and manifest to the destination, and returns the stored manifest. -func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte, error) { - pendingImage := ic.src - if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { - if !ic.canModifyManifest { - return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") - } - if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { - // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. - // So, this can only happen if we are trying to upload using one of the other MIME type candidates. - // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise - // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. - // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. - // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. - return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) - } - pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) - if err != nil { - return nil, errors.Wrap(err, "Error creating an updated image manifest") - } - pendingImage = pi - } - manifest, _, err := pendingImage.Manifest(ctx) - if err != nil { - return nil, errors.Wrap(err, "Error reading manifest") - } - - if err := ic.c.copyConfig(ctx, pendingImage); err != nil { - return nil, err - } - - ic.c.Printf("Writing manifest to image destination\n") - if err := ic.c.dest.PutManifest(ctx, manifest); err != nil { - return nil, errors.Wrap(err, "Error writing manifest") - } - return manifest, nil -} - -// newProgressPool creates a *mpb.Progress and a cleanup function. -// The caller must eventually call the returned cleanup function after the pool will no longer be updated. -func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { - ctx, cancel := context.WithCancel(ctx) - pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx)) - return pool, func() { - cancel() - pool.Wait() - } -} - -// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter -// is ioutil.Discard, the progress bar's output will be discarded -func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { - // shortDigestLen is the length of the digest used for blobs. - const shortDigestLen = 12 - - prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) - // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. - maxPrefixLen := len("Copying blob ") + shortDigestLen - if len(prefix) > maxPrefixLen { - prefix = prefix[:maxPrefixLen] - } - - // Use a normal progress bar when we know the size (i.e., size > 0). - // Otherwise, use a spinner to indicate that something's happening. - var bar *mpb.Bar - if info.Size > 0 { - bar = pool.AddBar(info.Size, - mpb.BarClearOnComplete(), - mpb.PrependDecorators( - decor.Name(prefix), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete), - ), - ) - } else { - bar = pool.AddSpinner(info.Size, - mpb.SpinnerOnLeft, - mpb.BarClearOnComplete(), - mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}), - mpb.PrependDecorators( - decor.Name(prefix), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.Name(""), " "+onComplete), - ), - ) - } - if c.progressOutput == ioutil.Discard { - c.Printf("Copying %s %s\n", kind, info.Digest) - } - return bar -} - -// copyConfig copies config.json, if any, from src to dest. -func (c *copier) copyConfig(ctx context.Context, src types.Image) error { - srcInfo := src.ConfigInfo() - if srcInfo.Digest != "" { - configBlob, err := src.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) - } - - destInfo, err := func() (types.BlobInfo, error) { // A scope for defer - progressPool, progressCleanup := c.newProgressPool(ctx) - defer progressCleanup() - bar := c.createProgressBar(progressPool, srcInfo, "config", "done") - destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) - if err != nil { - return types.BlobInfo{}, err - } - bar.SetTotal(int64(len(configBlob)), true) - return destInfo, nil - }() - if err != nil { - return nil - } - if destInfo.Digest != srcInfo.Digest { - return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) - } - } - return nil -} - -// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. -// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. -type diffIDResult struct { - digest digest.Digest - err error -} - -// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, -// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { - cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" - - // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. - if !diffIDIsNeeded { - reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) - } - if reused { - logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) - bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") - bar.SetTotal(0, true) - return blobInfo, cachedDiffID, nil - } - } - - // Fallback: copy the layer, computing the diffID if we need to do so - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - - bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") - - blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, Annotations: srcInfo.Annotations}, diffIDIsNeeded, bar) - if err != nil { - return types.BlobInfo{}, "", err - } - - diffID := cachedDiffID - if diffIDIsNeeded { - select { - case <-ctx.Done(): - return types.BlobInfo{}, "", ctx.Err() - case diffIDResult := <-diffIDChan: - if diffIDResult.err != nil { - return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") - } - logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) - diffID = diffIDResult.digest - } - } - - bar.SetTotal(srcInfo.Size, true) - return blobInfo, diffID, nil -} - -// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. -// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, -// perhaps compressing the stream if canCompress, -// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. -func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { - var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil - var diffIDChan chan diffIDResult - - err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below - if diffIDIsNeeded { - diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. - pipeReader, pipeWriter := io.Pipe() - defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. - pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() - }() - - getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { - // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further - // reading from the pipe has failed, we don’t really care. - // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, - // the return value includes an error indication, which we do check. - // - // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be - // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. - go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader - return pipeWriter - } - } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success - return blobInfo, diffIDChan, err - // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan -} - -// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. -func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { - result := diffIDResult{ - digest: "", - err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), - } - defer func() { dest <- result }() - defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. - - result.digest, result.err = computeDiffID(layerStream, decompressor) -} - -// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. -func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { - if decompressor != nil { - s, err := decompressor(stream) - if err != nil { - return "", err - } - defer s.Close() - stream = s - } - - return digest.Canonical.FromReader(stream) -} - -// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, -// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps compressing it if canCompress, -// and returns a complete blobInfo of the copied blob. -func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) { - // The copying happens through a pipeline of connected io.Readers. - // === Input: srcStream - - // === Process input through digestingReader to validate against the expected digest. - // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, - // use a separate validation failure indicator. - // Note that for this check we don't use the stronger "validationSucceeded" indicator, because - // dest.PutBlob may detect that the layer already exists, in which case we don't - // read stream to the end, and validation does not happen. - digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) - } - var destStream io.Reader = digestingReader - - // === Detect compression of the input stream. - // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. - compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) - } - isCompressed := decompressor != nil - destStream = bar.ProxyReader(destStream) - - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. - var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. - if getOriginalLayerCopyWriter != nil { - destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) - originalLayerReader = destStream - } - - desiredCompressionFormat := c.compressionFormat - - // === Deal with layer compression/decompression if necessary - var inputInfo types.BlobInfo - var compressionOperation types.LayerCompression - if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { - logrus.Debugf("Compressing blob on the fly") - compressionOperation = types.Compress - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, - // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, - // we don’t care. - go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { - // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally - // re-compressed using the desired format. - logrus.Debugf("Blob will be converted") - - compressionOperation = types.PreserveOriginal - s, err := decompressor(destStream) - if err != nil { - return types.BlobInfo{}, err - } - defer s.Close() - - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter - - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { - logrus.Debugf("Blob will be decompressed") - compressionOperation = types.Decompress - s, err := decompressor(destStream) - if err != nil { - return types.BlobInfo{}, err - } - defer s.Close() - destStream = s - inputInfo.Digest = "" - inputInfo.Size = -1 - } else { - // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. - logrus.Debugf("Using original blob without modification") - compressionOperation = types.PreserveOriginal - inputInfo = srcInfo - } - - // === Report progress using the c.progress channel, if required. - if c.progress != nil && c.progressInterval > 0 { - destStream = &progressReader{ - source: destStream, - channel: c.progress, - interval: c.progressInterval, - artifact: srcInfo, - lastTime: time.Now(), - } - } - - // === Finally, send the layer stream to dest. - uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig) - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") - } - - uploadedInfo.Annotations = srcInfo.Annotations - - uploadedInfo.CompressionOperation = compressionOperation - // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. - if canModifyBlob && !isConfig { - uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat - } - - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer - // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. - // So, read everything from originalLayerReader, which will cause the rest to be - // sent there if we are not already at EOF. - if getOriginalLayerCopyWriter != nil { - logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest) - } - } - - if digestingReader.validationFailed { // Coverage: This should never happen. - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) - } - if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) - } - if digestingReader.validationSucceeded { - // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: - // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader - // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob - // (because inputInfo.Digest == "", this must have been computed afresh). - switch compressionOperation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: - c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: - c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) - default: - return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) - } - } - return uploadedInfo, nil -} - -// compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) { - err := errors.New("Internal error: unexpected panic in compressGoroutine") - defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. - dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() - }() - - compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel) - if err != nil { - return - } - defer compressor.Close() - - buf := make([]byte, compressionBufferSize) - - _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() -} diff --git a/vendor/github.com/containers/image/v4/copy/manifest.go b/vendor/github.com/containers/image/v4/copy/manifest.go deleted file mode 100644 index 7c981fcad..000000000 --- a/vendor/github.com/containers/image/v4/copy/manifest.go +++ /dev/null @@ -1,121 +0,0 @@ -package copy - -import ( - "context" - "strings" - - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. -// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. -// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. -var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} - -// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. -type orderedSet struct { - list []string - included map[string]struct{} -} - -// newOrderedSet creates a correctly initialized orderedSet. -// [Sometimes it would be really nice if Golang had constructors…] -func newOrderedSet() *orderedSet { - return &orderedSet{ - list: []string{}, - included: map[string]struct{}{}, - } -} - -// append adds s to the end of os, only if it is not included already. -func (os *orderedSet) append(s string) { - if _, ok := os.included[s]; !ok { - os.list = append(os.list, s) - os.included[s] = struct{}{} - } -} - -// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. -// Note that the conversion will only happen later, through ic.src.UpdatedImage -// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), -// and a list of other possible alternatives, in order. -func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { - _, srcType, err := ic.src.Manifest(ctx) - if err != nil { // This should have been cached?! - return "", nil, errors.Wrap(err, "Error reading manifest") - } - normalizedSrcType := manifest.NormalizedMIMEType(srcType) - if srcType != normalizedSrcType { - logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) - srcType = normalizedSrcType - } - - if forceManifestMIMEType != "" { - destSupportedManifestMIMETypes = []string{forceManifestMIMEType} - } - - if len(destSupportedManifestMIMETypes) == 0 { - return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. - } - supportedByDest := map[string]struct{}{} - for _, t := range destSupportedManifestMIMETypes { - supportedByDest[t] = struct{}{} - } - - // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. - // So, build a list of types to try in order of decreasing preference. - // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, - // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. - // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types - // and never attempt the other one. - prioritizedTypes := newOrderedSet() - - // First of all, prefer to keep the original manifest unmodified. - if _, ok := supportedByDest[srcType]; ok { - prioritizedTypes.append(srcType) - } - if !ic.canModifyManifest { - // We could also drop the !ic.canModifyManifest check and have the caller - // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” - // special case in here; the caller can then worry (or not) only about a good UI. - logrus.Debugf("We can't modify the manifest, hoping for the best...") - return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? - } - - // Then use our list of preferred types. - for _, t := range preferredManifestMIMETypes { - if _, ok := supportedByDest[t]; ok { - prioritizedTypes.append(t) - } - } - - // Finally, try anything else the destination supports. - for _, t := range destSupportedManifestMIMETypes { - prioritizedTypes.append(t) - } - - logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. - return "", nil, errors.New("Internal error: no candidate MIME types") - } - preferredType := prioritizedTypes.list[0] - if preferredType != srcType { - ic.manifestUpdates.ManifestMIMEType = preferredType - } else { - logrus.Debugf("... will first try using the original manifest unmodified") - } - return preferredType, prioritizedTypes.list[1:], nil -} - -// isMultiImage returns true if img is a list of images -func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { - _, mt, err := img.Manifest(ctx) - if err != nil { - return false, err - } - return manifest.MIMETypeIsMultiImage(mt), nil -} diff --git a/vendor/github.com/containers/image/v4/copy/progress_reader.go b/vendor/github.com/containers/image/v4/copy/progress_reader.go deleted file mode 100644 index c6a1e3da6..000000000 --- a/vendor/github.com/containers/image/v4/copy/progress_reader.go +++ /dev/null @@ -1,28 +0,0 @@ -package copy - -import ( - "io" - "time" - - "github.com/containers/image/v4/types" -) - -// progressReader is a reader that reports its progress on an interval. -type progressReader struct { - source io.Reader - channel chan types.ProgressProperties - interval time.Duration - artifact types.BlobInfo - lastTime time.Time - offset uint64 -} - -func (r *progressReader) Read(p []byte) (int, error) { - n, err := r.source.Read(p) - r.offset += uint64(n) - if time.Since(r.lastTime) > r.interval { - r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset} - r.lastTime = time.Now() - } - return n, err -} diff --git a/vendor/github.com/containers/image/v4/copy/sign.go b/vendor/github.com/containers/image/v4/copy/sign.go deleted file mode 100644 index 64c3b4b2b..000000000 --- a/vendor/github.com/containers/image/v4/copy/sign.go +++ /dev/null @@ -1,31 +0,0 @@ -package copy - -import ( - "github.com/containers/image/v4/signature" - "github.com/containers/image/v4/transports" - "github.com/pkg/errors" -) - -// createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { - mech, err := signature.NewGPGSigningMechanism() - if err != nil { - return nil, errors.Wrap(err, "Error initializing GPG") - } - defer mech.Close() - if err := mech.SupportsSigning(); err != nil { - return nil, errors.Wrap(err, "Signing not supported") - } - - dockerReference := c.dest.Reference().DockerReference() - if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) - } - - c.Printf("Signing manifest\n") - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) - if err != nil { - return nil, errors.Wrap(err, "Error creating signature") - } - return newSig, nil -} diff --git a/vendor/github.com/containers/image/v4/directory/directory_dest.go b/vendor/github.com/containers/image/v4/directory/directory_dest.go deleted file mode 100644 index 18f7dde70..000000000 --- a/vendor/github.com/containers/image/v4/directory/directory_dest.go +++ /dev/null @@ -1,260 +0,0 @@ -package directory - -import ( - "context" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const version = "Directory Transport Version: 1.1\n" - -// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created -// using the 'dir' transport -var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") - -type dirImageDestination struct { - ref dirReference - compress bool -} - -// newImageDestination returns an ImageDestination for writing to a directory. -func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { - d := &dirImageDestination{ref: ref, compress: compress} - - // If directory exists check if it is empty - // if not empty, check whether the contents match that of a container image directory and overwrite the contents - // if the contents don't match throw an error - dirExists, err := pathExists(d.ref.resolvedPath) - if err != nil { - return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) - } - if dirExists { - isEmpty, err := isDirEmpty(d.ref.resolvedPath) - if err != nil { - return nil, err - } - - if !isEmpty { - versionExists, err := pathExists(d.ref.versionPath()) - if err != nil { - return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) - } - if versionExists { - contents, err := ioutil.ReadFile(d.ref.versionPath()) - if err != nil { - return nil, err - } - // check if contents of version file is what we expect it to be - if string(contents) != version { - return nil, ErrNotContainerImageDir - } - } else { - return nil, ErrNotContainerImageDir - } - // delete directory contents so that only one image is in the directory at a time - if err = removeDirContents(d.ref.resolvedPath); err != nil { - return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) - } - logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) - } - } else { - // create directory if it doesn't exist - if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { - return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) - } - } - // create version file - err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) - if err != nil { - return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) - } - return d, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dirImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dirImageDestination) Close() error { - return nil -} - -func (d *dirImageDestination) SupportedManifestMIMETypes() []string { - return nil -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { - if d.compress { - return types.Compress - } - return types.PreserveOriginal -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *dirImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *dirImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") - if err != nil { - return types.BlobInfo{}, err - } - succeeded := false - defer func() { - blobFile.Close() - if !succeeded { - os.Remove(blobFile.Name()) - } - }() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err - } - blobPath := d.ref.layerPath(computedDigest) - if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err - } - succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - blobPath := d.ref.layerPath(info.Digest) - finfo, err := os.Stat(blobPath) - if err != nil && os.IsNotExist(err) { - return false, types.BlobInfo{}, nil - } - if err != nil { - return false, types.BlobInfo{}, err - } - return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil - -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte) error { - return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) -} - -func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil { - return err - } - } - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dirImageDestination) Commit(ctx context.Context) error { - return nil -} - -// returns true if path exists -func pathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if err != nil && os.IsNotExist(err) { - return false, nil - } - return false, err -} - -// returns true if directory is empty -func isDirEmpty(path string) (bool, error) { - files, err := ioutil.ReadDir(path) - if err != nil { - return false, err - } - return len(files) == 0, nil -} - -// deletes the contents of a directory -func removeDirContents(path string) error { - files, err := ioutil.ReadDir(path) - if err != nil { - return err - } - - for _, file := range files { - if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/containers/image/v4/directory/directory_src.go b/vendor/github.com/containers/image/v4/directory/directory_src.go deleted file mode 100644 index 921c1941c..000000000 --- a/vendor/github.com/containers/image/v4/directory/directory_src.go +++ /dev/null @@ -1,96 +0,0 @@ -package directory - -import ( - "context" - "io" - "io/ioutil" - "os" - - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type dirImageSource struct { - ref dirReference -} - -// newImageSource returns an ImageSource reading from an existing directory. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ref dirReference) types.ImageSource { - return &dirImageSource{ref} -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dirImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dirImageSource) Close() error { - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) - } - m, err := ioutil.ReadFile(s.ref.manifestPath()) - if err != nil { - return nil, "", err - } - return m, manifest.GuessMIMEType(m), err -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *dirImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - r, err := os.Open(s.ref.layerPath(info.Digest)) - if err != nil { - return nil, -1, err - } - fi, err := r.Stat() - if err != nil { - return nil, -1, err - } - return r, fi.Size(), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) - } - signatures := [][]byte{} - for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) - if err != nil { - if os.IsNotExist(err) { - break - } - return nil, err - } - signatures = append(signatures, signature) - } - return signatures, nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. -func (s *dirImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v4/directory/directory_transport.go b/vendor/github.com/containers/image/v4/directory/directory_transport.go deleted file mode 100644 index 29ac7115f..000000000 --- a/vendor/github.com/containers/image/v4/directory/directory_transport.go +++ /dev/null @@ -1,187 +0,0 @@ -package directory - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/containers/image/v4/directory/explicitfilepath" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for directory paths. -var Transport = dirTransport{} - -type dirTransport struct{} - -func (t dirTransport) Name() string { - return "dir" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { - return NewReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { - if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(scope) - if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) - } - return nil -} - -// dirReference is an ImageReference for directory paths. -type dirReference struct { - // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! - // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. - - // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid - // being exposed to symlinks and renames in the parent directories to the working directory). - // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) - path string // As specified by the user. May be relative, contain symlinks, etc. - resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. -} - -// There is no directory.ParseReference because it is rather pointless. -// Callers who need a transport-independent interface will go through -// dirTransport.ParseReference; callers who intentionally deal with directories -// can use directory.NewReference. - -// NewReference returns a directory reference for a specified path. -// -// We do not expose an API supplying the resolvedPath; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedPath. -func NewReference(path string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) - if err != nil { - return nil, err - } - return dirReference{path: path, resolvedPath: resolved}, nil -} - -func (ref dirReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dirReference) StringWithinTransport() string { - return ref.path -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dirReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dirReference) PolicyConfigurationIdentity() string { - return ref.resolvedPath -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dirReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedPath - for { - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 || lastSlash == 0 { - break - } - path = path[:lastSlash] - res = append(res, path) - } - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by dirTransport.ValidatePolicyConfigurationScope above. - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src := newImageSource(ref) - return image.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ref), nil -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - compress := false - if sys != nil { - compress = sys.DirForceCompress - } - return newImageDestination(ref, compress) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for dir: images") -} - -// manifestPath returns a path for the manifest within a directory using our conventions. -func (ref dirReference) manifestPath() string { - return filepath.Join(ref.path, "manifest.json") -} - -// layerPath returns a path for a layer tarball within a directory using our conventions. -func (ref dirReference) layerPath(digest digest.Digest) string { - // FIXME: Should we keep the digest identification? - return filepath.Join(ref.path, digest.Hex()) -} - -// signaturePath returns a path for a signature within a directory using our conventions. -func (ref dirReference) signaturePath(index int) string { - return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) -} - -// versionPath returns a path for the version file within a directory using our conventions. -func (ref dirReference) versionPath() string { - return filepath.Join(ref.path, "version") -} diff --git a/vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go deleted file mode 100644 index 71136b880..000000000 --- a/vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go +++ /dev/null @@ -1,56 +0,0 @@ -package explicitfilepath - -import ( - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. -// To do so, all elements of the input path must exist; as a special case, the final component may be -// a non-existent name (but not a symlink pointing to a non-existent name) -// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. -func ResolvePathToFullyExplicit(path string) (string, error) { - switch _, err := os.Lstat(path); { - case err == nil: - return resolveExistingPathToFullyExplicit(path) - case os.IsNotExist(err): - parent, file := filepath.Split(path) - resolvedParent, err := resolveExistingPathToFullyExplicit(parent) - if err != nil { - return "", err - } - if file == "." || file == ".." { - // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. - // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. - // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components - // in the resulting path, and especially not at the end. - return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) - } - resolvedPath := filepath.Join(resolvedParent, file) - // As a sanity check, ensure that there are no "." or ".." components. - cleanedResolvedPath := filepath.Clean(resolvedPath) - if cleanedResolvedPath != resolvedPath { - // Coverage: This should never happen. - return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) - } - return resolvedPath, nil - default: // err != nil, unrecognized - return "", err - } -} - -// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, -// but without the special case for missing final component. -func resolveExistingPathToFullyExplicit(path string) (string, error) { - resolved, err := filepath.Abs(path) - if err != nil { - return "", err // Coverage: This can fail only if os.Getwd() fails. - } - resolved, err = filepath.EvalSymlinks(resolved) - if err != nil { - return "", err - } - return filepath.Clean(resolved), nil -} diff --git a/vendor/github.com/containers/image/v4/docker/archive/dest.go b/vendor/github.com/containers/image/v4/docker/archive/dest.go deleted file mode 100644 index 9e06e7c96..000000000 --- a/vendor/github.com/containers/image/v4/docker/archive/dest.go +++ /dev/null @@ -1,72 +0,0 @@ -package archive - -import ( - "context" - "io" - "os" - - "github.com/containers/image/v4/docker/tarfile" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -type archiveImageDestination struct { - *tarfile.Destination // Implements most of types.ImageDestination - ref archiveReference - writer io.Closer -} - -func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { - // ref.path can be either a pipe or a regular file - // in the case of a pipe, we require that we can open it for write - // in the case of a regular file, we don't want to overwrite any pre-existing file - // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, - // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) - fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, errors.Wrapf(err, "error opening file %q", ref.path) - } - - fhStat, err := fh.Stat() - if err != nil { - return nil, errors.Wrapf(err, "error statting file %q", ref.path) - } - - if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { - return nil, errors.New("docker-archive doesn't support modifying existing images") - } - - tarDest := tarfile.NewDestination(fh, ref.destinationRef) - if sys != nil && sys.DockerArchiveAdditionalTags != nil { - tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) - } - return &archiveImageDestination{ - Destination: tarDest, - ref: ref, - writer: fh, - }, nil -} - -// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved -func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Decompress -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *archiveImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *archiveImageDestination) Close() error { - return d.writer.Close() -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *archiveImageDestination) Commit(ctx context.Context) error { - return d.Destination.Commit(ctx) -} diff --git a/vendor/github.com/containers/image/v4/docker/archive/src.go b/vendor/github.com/containers/image/v4/docker/archive/src.go deleted file mode 100644 index feea0decd..000000000 --- a/vendor/github.com/containers/image/v4/docker/archive/src.go +++ /dev/null @@ -1,40 +0,0 @@ -package archive - -import ( - "context" - "github.com/containers/image/v4/docker/tarfile" - "github.com/containers/image/v4/types" - "github.com/sirupsen/logrus" -) - -type archiveImageSource struct { - *tarfile.Source // Implements most of types.ImageSource - ref archiveReference -} - -// newImageSource returns a types.ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) { - if ref.destinationRef != nil { - logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") - } - src, err := tarfile.NewSourceFromFile(ref.path) - if err != nil { - return nil, err - } - return &archiveImageSource{ - Source: src, - ref: ref, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *archiveImageSource) Reference() types.ImageReference { - return s.ref -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *archiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v4/docker/archive/transport.go b/vendor/github.com/containers/image/v4/docker/archive/transport.go deleted file mode 100644 index 347fdbd6e..000000000 --- a/vendor/github.com/containers/image/v4/docker/archive/transport.go +++ /dev/null @@ -1,160 +0,0 @@ -package archive - -import ( - "context" - "fmt" - "strings" - - "github.com/containers/image/v4/docker/reference" - ctrImage "github.com/containers/image/v4/image" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for local Docker archives. -var Transport = archiveTransport{} - -type archiveTransport struct{} - -func (t archiveTransport) Name() string { - return "docker-archive" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in archiveReference.PolicyConfigurationIdentity. - return errors.New(`docker-archive: does not support any scopes except the default "" one`) -} - -// archiveReference is an ImageReference for Docker images. -type archiveReference struct { - // only used for destinations - // archiveReference.destinationRef is optional and can be nil for destinations as well. - destinationRef reference.NamedTagged - path string -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - if refString == "" { - return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) - } - - parts := strings.SplitN(refString, ":", 2) - path := parts[0] - var destinationRef reference.NamedTagged - - // A :tag was specified, which is only necessary for destinations. - if len(parts) == 2 { - ref, err := reference.ParseNormalizedNamed(parts[1]) - if err != nil { - return nil, errors.Wrapf(err, "docker-archive parsing reference") - } - ref = reference.TagNameOnly(ref) - - if _, isDigest := ref.(reference.Canonical); isDigest { - return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) - } - - refTagged, isTagged := ref.(reference.NamedTagged) - if !isTagged { - // Really shouldn't be hit... - return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString) - } - destinationRef = refTagged - } - - return archiveReference{ - destinationRef: destinationRef, - path: path, - }, nil -} - -func (ref archiveReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref archiveReference) StringWithinTransport() string { - if ref.destinationRef == nil { - return ref.path - } - return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String()) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref archiveReference) DockerReference() reference.Named { - return ref.destinationRef -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref archiveReference) PolicyConfigurationIdentity() string { - // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. - return "" -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref archiveReference) PolicyConfigurationNamespaces() []string { - // TODO - return []string{} -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, ref) - if err != nil { - return nil, err - } - return ctrImage.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - // Not really supported, for safety reasons. - return errors.New("Deleting images not implemented for docker-archive: images") -} diff --git a/vendor/github.com/containers/image/v4/docker/cache.go b/vendor/github.com/containers/image/v4/docker/cache.go deleted file mode 100644 index 51bf5b0d3..000000000 --- a/vendor/github.com/containers/image/v4/docker/cache.go +++ /dev/null @@ -1,23 +0,0 @@ -package docker - -import ( - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/types" -) - -// bicTransportScope returns a BICTransportScope appropriate for ref. -func bicTransportScope(ref dockerReference) types.BICTransportScope { - // Blobs can be reused across the whole registry. - return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} -} - -// newBICLocationReference returns a BICLocationReference appropriate for ref. -func newBICLocationReference(ref dockerReference) types.BICLocationReference { - // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). - return types.BICLocationReference{Opaque: ref.ref.Name()} -} - -// parseBICLocationReference returns a repository for encoded lr. -func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { - return reference.ParseNormalizedNamed(lr.Opaque) -} diff --git a/vendor/github.com/containers/image/v4/docker/daemon/client.go b/vendor/github.com/containers/image/v4/docker/daemon/client.go deleted file mode 100644 index 94c4970f2..000000000 --- a/vendor/github.com/containers/image/v4/docker/daemon/client.go +++ /dev/null @@ -1,85 +0,0 @@ -package daemon - -import ( - "net/http" - "path/filepath" - - "github.com/containers/image/v4/types" - dockerclient "github.com/docker/docker/client" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - // The default API version to be used in case none is explicitly specified - defaultAPIVersion = "1.22" -) - -// NewDockerClient initializes a new API client based on the passed SystemContext. -func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { - host := dockerclient.DefaultDockerHost - if sys != nil && sys.DockerDaemonHost != "" { - host = sys.DockerDaemonHost - } - - // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. - // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s - // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket - // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. - // - // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. - // - // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set - // TLSClientConfig to nil. This can be achieved by using the form `http://` - url, err := dockerclient.ParseHostURL(host) - if err != nil { - return nil, err - } - var httpClient *http.Client - if url.Scheme != "unix" { - if url.Scheme == "http" { - httpClient = httpConfig() - } else { - hc, err := tlsConfig(sys) - if err != nil { - return nil, err - } - httpClient = hc - } - } - - return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) -} - -func tlsConfig(sys *types.SystemContext) (*http.Client, error) { - options := tlsconfig.Options{} - if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { - options.InsecureSkipVerify = true - } - - if sys != nil && sys.DockerDaemonCertPath != "" { - options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") - options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") - options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") - } - - tlsc, err := tlsconfig.Client(options) - if err != nil { - return nil, err - } - - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - CheckRedirect: dockerclient.CheckRedirect, - }, nil -} - -func httpConfig() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: nil, - }, - CheckRedirect: dockerclient.CheckRedirect, - } -} diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go deleted file mode 100644 index 2c56ab934..000000000 --- a/vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go +++ /dev/null @@ -1,144 +0,0 @@ -package daemon - -import ( - "context" - "io" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/docker/tarfile" - "github.com/containers/image/v4/types" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type daemonImageDestination struct { - ref daemonReference - mustMatchRuntimeOS bool - *tarfile.Destination // Implements most of types.ImageDestination - // For talking to imageLoadGoroutine - goroutineCancel context.CancelFunc - statusChannel <-chan error - writer *io.PipeWriter - // Other state - committed bool // writer has been closed -} - -// newImageDestination returns a types.ImageDestination for the specified image reference. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { - if ref.ref == nil { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) - } - namedTaggedRef, ok := ref.ref.(reference.NamedTagged) - if !ok { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) - } - - var mustMatchRuntimeOS = true - if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { - mustMatchRuntimeOS = false - } - - c, err := newDockerClient(sys) - if err != nil { - return nil, errors.Wrap(err, "Error initializing docker engine client") - } - - reader, writer := io.Pipe() - // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. - statusChannel := make(chan error, 1) - - goroutineContext, goroutineCancel := context.WithCancel(ctx) - go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) - - return &daemonImageDestination{ - ref: ref, - mustMatchRuntimeOS: mustMatchRuntimeOS, - Destination: tarfile.NewDestination(writer, namedTaggedRef), - goroutineCancel: goroutineCancel, - statusChannel: statusChannel, - writer: writer, - committed: false, - }, nil -} - -// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel -func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { - err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") - defer func() { - logrus.Debugf("docker-daemon: sending done, status %v", err) - statusChannel <- err - }() - defer func() { - if err == nil { - reader.Close() - } else { - reader.CloseWithError(err) - } - }() - - resp, err := c.ImageLoad(ctx, reader, true) - if err != nil { - err = errors.Wrap(err, "Error saving image to docker engine") - return - } - defer resp.Body.Close() -} - -// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved -func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.PreserveOriginal -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *daemonImageDestination) MustMatchRuntimeOS() bool { - return d.mustMatchRuntimeOS -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *daemonImageDestination) Close() error { - if !d.committed { - logrus.Debugf("docker-daemon: Closing tar stream to abort loading") - // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. - // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including - // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the - // net/http version with native Context support in Go 1.7) do not always actually immediately cancel - // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and - // return early if the context is canceled without terminating the goroutine at all. - // So we need this CloseWithError to terminate sending the HTTP request Body - // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending - // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. - // Whether that works or not, closing the PipeWriter seems desirable in any case. - d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) - } - d.goroutineCancel() - - return nil -} - -func (d *daemonImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *daemonImageDestination) Commit(ctx context.Context) error { - logrus.Debugf("docker-daemon: Closing tar stream") - if err := d.Destination.Commit(ctx); err != nil { - return err - } - if err := d.writer.Close(); err != nil { - return err - } - d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. - - logrus.Debugf("docker-daemon: Waiting for status") - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-d.statusChannel: - return err - } -} diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go deleted file mode 100644 index f6f60aaf9..000000000 --- a/vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go +++ /dev/null @@ -1,62 +0,0 @@ -package daemon - -import ( - "context" - - "github.com/containers/image/v4/docker/tarfile" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -type daemonImageSource struct { - ref daemonReference - *tarfile.Source // Implements most of types.ImageSource -} - -type layerInfo struct { - path string - size int64 -} - -// newImageSource returns a types.ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -// -// It would be great if we were able to stream the input tar as it is being -// sent; but Docker sends the top-level manifest, which determines which paths -// to look for, at the end, so in we will need to seek back and re-read, several times. -// (We could, perhaps, expect an exact sequence, assume that the first plaintext file -// is the config, and that the following len(RootFS) files are the layers, but that feels -// way too brittle.) -func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { - c, err := newDockerClient(sys) - if err != nil { - return nil, errors.Wrap(err, "Error initializing docker engine client") - } - // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. - // Either way ImageSave should create a tarball with exactly one image. - inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) - if err != nil { - return nil, errors.Wrap(err, "Error loading image from docker engine") - } - defer inputStream.Close() - - src, err := tarfile.NewSourceFromStream(inputStream) - if err != nil { - return nil, err - } - return &daemonImageSource{ - ref: ref, - Source: src, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *daemonImageSource) Reference() types.ImageReference { - return s.ref -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *daemonImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go deleted file mode 100644 index 4c6986ba0..000000000 --- a/vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go +++ /dev/null @@ -1,223 +0,0 @@ -package daemon - -import ( - "context" - "fmt" - - "github.com/containers/image/v4/docker/policyconfiguration" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for images managed by a local Docker daemon. -var Transport = daemonTransport{} - -type daemonTransport struct{} - -// Name returns the name of the transport, which must be unique among other transports. -func (t daemonTransport) Name() string { - return "docker-daemon" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { - // ID values cannot be effectively namespaced, and are clearly invalid host:port values. - if _, err := digest.Parse(scope); err == nil { - return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) - } - - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// daemonReference is an ImageReference for images managed by a local Docker daemon -// Exactly one of id and ref can be set. -// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) -// For daemonImageDestination, it must be a ref, which is NamedTagged. -// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. -// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) -type daemonReference struct { - id digest.Digest - ref reference.Named // !reference.IsNameOnly -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. - // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). - - // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). - // reference.ParseAnyReference interprets such strings as digests. - if dgst, err := digest.Parse(refString); err == nil { - // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. - // Other digest references are ambiguous, so refuse them. - if dgst.Algorithm() != digest.Canonical { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) - } - return NewReference(dgst, nil) - } - - ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values - if err != nil { - return nil, err - } - if reference.FamiliarName(ref) == digest.Canonical.String() { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) - } - return NewReference("", ref) -} - -// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) -func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { - if id != "" && ref != nil { - return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") - } - if ref != nil { - if reference.IsNameOnly(ref) { - return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. - // This MAY be accepted in the future. - // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop - // the tag or the digest first?) - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") - } - } - return daemonReference{ - id: id, - ref: ref, - }, nil -} - -func (ref daemonReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; -// instead, see transports.ImageName(). -func (ref daemonReference) StringWithinTransport() string { - switch { - case ref.id != "": - return ref.id.String() - case ref.ref != nil: - return reference.FamiliarString(ref.ref) - default: // Coverage: Should never happen, NewReference above should refuse such values. - panic("Internal inconsistency: daemonReference has empty id and nil ref") - } -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref daemonReference) DockerReference() reference.Named { - return ref.ref // May be nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref daemonReference) PolicyConfigurationIdentity() string { - // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. - // But the existence of image IDs means that we can’t truly well namespace the input: - // a single image can be namespaced either using the name or the ID depending on how it is named. - // - // That’s fairly unexpected, but we have to cope somehow. - // - // So, use the ordinary docker/policyconfiguration namespacing for named images. - // image IDs all fall into the root namespace. - // Users can set up the root namespace to be either untrusted or rejected, - // and to set up specific trust for named namespaces. This allows verifying image - // identity when a name is known, and unnamed images would be untrusted or rejected. - switch { - case ref.id != "": - return "" // This still allows using the default "" scope to define a global policy for ID-identified images. - case ref.ref != nil: - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res - default: // Coverage: Should never happen, NewReference above should refuse such values. - panic("Internal inconsistency: daemonReference has empty id and nil ref") - } -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref daemonReference) PolicyConfigurationNamespaces() []string { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - switch { - case ref.id != "": - return []string{} - case ref.ref != nil: - return policyconfiguration.DockerReferenceNamespaces(ref.ref) - default: // Coverage: Should never happen, NewReference above should refuse such values. - panic("Internal inconsistency: daemonReference has empty id and nil ref") - } -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - // Should this just untag the image? Should this stop running containers? - // The semantics is not quite as clear as for remote repositories. - // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. - return errors.Errorf("Deleting images not implemented for docker-daemon: images") -} diff --git a/vendor/github.com/containers/image/v4/docker/docker_client.go b/vendor/github.com/containers/image/v4/docker/docker_client.go deleted file mode 100644 index d5662a030..000000000 --- a/vendor/github.com/containers/image/v4/docker/docker_client.go +++ /dev/null @@ -1,645 +0,0 @@ -package docker - -import ( - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/pkg/docker/config" - "github.com/containers/image/v4/pkg/sysregistriesv2" - "github.com/containers/image/v4/pkg/tlsclientconfig" - "github.com/containers/image/v4/types" - "github.com/docker/distribution/registry/client" - "github.com/docker/go-connections/tlsconfig" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - dockerHostname = "docker.io" - dockerV1Hostname = "index.docker.io" - dockerRegistry = "registry-1.docker.io" - - resolvedPingV2URL = "%s://%s/v2/" - resolvedPingV1URL = "%s://%s/v1/_ping" - tagsPath = "/v2/%s/tags/list" - manifestPath = "/v2/%s/manifests/%s" - blobsPath = "/v2/%s/blobs/%s" - blobUploadPath = "/v2/%s/blobs/uploads/" - extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" - - minimumTokenLifetimeSeconds = 60 - - extensionSignatureSchemaVersion = 2 // extensionSignature.Version - extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type -) - -var ( - // ErrV1NotSupported is returned when we're trying to talk to a - // docker V1 registry. - ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") - // ErrUnauthorizedForCredentials is returned when the status code returned is 401 - ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") - systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} -) - -// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: -// signature represents a Docker image signature. -type extensionSignature struct { - Version int `json:"schemaVersion"` // Version specifies the schema version - Name string `json:"name"` // Name must be in "sha256:@signatureName" format - Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" - Content []byte `json:"content"` // Content contains the signature -} - -// signatureList represents list of Docker image signatures. -type extensionSignatureList struct { - Signatures []extensionSignature `json:"signatures"` -} - -type bearerToken struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - expirationTime time.Time -} - -// dockerClient is configuration for dealing with a single Docker registry. -type dockerClient struct { - // The following members are set by newDockerClient and do not change afterwards. - sys *types.SystemContext - registry string - - // tlsClientConfig is setup by newDockerClient and will be used and updated - // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. - tlsClientConfig *tls.Config - // The following members are not set by newDockerClient and must be set by callers if needed. - username string - password string - signatureBase signatureStorageBase - scope authScope - - // The following members are detected registry properties: - // They are set after a successful detectProperties(), and never change afterwards. - client *http.Client - scheme string - challenges []challenge - supportsSignatures bool - - // Private state for setupRequestAuth (key: string, value: bearerToken) - tokenCache sync.Map - // Private state for detectProperties: - detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. - detectPropertiesError error // detectPropertiesError caches the initial error. -} - -type authScope struct { - remoteName string - actions string -} - -// sendAuth determines whether we need authentication for v2 or v1 endpoint. -type sendAuth int - -const ( - // v2 endpoint with authentication. - v2Auth sendAuth = iota - // v1 endpoint with authentication. - // TODO: Get v1Auth working - // v1Auth - // no authentication, works for both v1 and v2. - noAuth -) - -func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { - token := new(bearerToken) - if err := json.Unmarshal(blob, &token); err != nil { - return nil, err - } - if token.Token == "" { - token.Token = token.AccessToken - } - if token.ExpiresIn < minimumTokenLifetimeSeconds { - token.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) - } - if token.IssuedAt.IsZero() { - token.IssuedAt = time.Now().UTC() - } - token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) - return token, nil -} - -// this is cloned from docker/go-connections because upstream docker has changed -// it and make deps here fails otherwise. -// We'll drop this once we upgrade to docker 1.13.x deps. -func serverDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, - } -} - -// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. -func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { - if sys != nil && sys.DockerCertPath != "" { - return sys.DockerCertPath, nil - } - if sys != nil && sys.DockerPerHostCertDirPath != "" { - return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil - } - - var ( - hostCertDir string - fullCertDirPath string - ) - for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { - if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) - } else { - hostCertDir = systemPerHostCertDirPath - } - - fullCertDirPath = filepath.Join(hostCertDir, hostPort) - _, err := os.Stat(fullCertDirPath) - if err == nil { - break - } - if os.IsNotExist(err) { - continue - } - if os.IsPermission(err) { - logrus.Debugf("error accessing certs directory due to permissions: %v", err) - continue - } - if err != nil { - return "", err - } - } - return fullCertDirPath, nil -} - -// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) -// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) -func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { - registry := reference.Domain(ref.ref) - username, password, err := config.GetAuthentication(sys, registry) - if err != nil { - return nil, errors.Wrapf(err, "error getting username and password") - } - sigBase, err := configuredSignatureStorageBase(sys, ref, write) - if err != nil { - return nil, err - } - - client, err := newDockerClient(sys, registry, ref.ref.Name()) - if err != nil { - return nil, err - } - client.username = username - client.password = password - client.signatureBase = sigBase - client.scope.actions = actions - client.scope.remoteName = reference.Path(ref.ref) - return client, nil -} - -// newDockerClient returns a new dockerClient instance for the given registry -// and reference. The reference is used to query the registry configuration -// and can either be a registry (e.g, "registry.com[:5000]"), a repository -// (e.g., "registry.com[:5000][/some/namespace]/repo"). -// Please note that newDockerClient does not set all members of dockerClient -// (e.g., username and password); those must be set by callers if necessary. -func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { - hostName := registry - if registry == dockerHostname { - registry = dockerRegistry - } - tlsClientConfig := serverDefault() - - // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, - // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible - // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because - // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is - // undocumented and may change if docker/docker changes. - certDir, err := dockerCertDir(sys, hostName) - if err != nil { - return nil, err - } - if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { - return nil, err - } - - // Check if TLS verification shall be skipped (default=false) which can - // be specified in the sysregistriesv2 configuration. - skipVerify := false - reg, err := sysregistriesv2.FindRegistry(sys, reference) - if err != nil { - return nil, errors.Wrapf(err, "error loading registries") - } - if reg != nil { - if reg.Blocked { - return nil, fmt.Errorf("registry %s is blocked in %s", reg.Prefix, sysregistriesv2.ConfigPath(sys)) - } - skipVerify = reg.Insecure - } - tlsClientConfig.InsecureSkipVerify = skipVerify - - return &dockerClient{ - sys: sys, - registry: registry, - tlsClientConfig: tlsClientConfig, - }, nil -} - -// CheckAuth validates the credentials by attempting to log into the registry -// returns an error if an error occurred while making the http request or the status code received was 401 -func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { - client, err := newDockerClient(sys, registry, registry) - if err != nil { - return errors.Wrapf(err, "error creating new docker client") - } - client.username = username - client.password = password - - resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) - if err != nil { - return err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - return nil - case http.StatusUnauthorized: - return ErrUnauthorizedForCredentials - default: - return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode)) - } -} - -// SearchResult holds the information of each matching image -// It matches the output returned by the v1 endpoint -type SearchResult struct { - Name string `json:"name"` - Description string `json:"description"` - // StarCount states the number of stars the image has - StarCount int `json:"star_count"` - IsTrusted bool `json:"is_trusted"` - // IsAutomated states whether the image is an automated build - IsAutomated bool `json:"is_automated"` - // IsOfficial states whether the image is an official build - IsOfficial bool `json:"is_official"` -} - -// SearchRegistry queries a registry for images that contain "image" in their name -// The limit is the max number of results desired -// Note: The limit value doesn't work with all registries -// for example registry.access.redhat.com returns all the results without limiting it to the limit value -func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { - type V2Results struct { - // Repositories holds the results returned by the /v2/_catalog endpoint - Repositories []string `json:"repositories"` - } - type V1Results struct { - // Results holds the results returned by the /v1/search endpoint - Results []SearchResult `json:"results"` - } - v2Res := &V2Results{} - v1Res := &V1Results{} - - // Get credentials from authfile for the underlying hostname - username, password, err := config.GetAuthentication(sys, registry) - if err != nil { - return nil, errors.Wrapf(err, "error getting username and password") - } - - // The /v2/_catalog endpoint has been disabled for docker.io therefore - // the call made to that endpoint will fail. So using the v1 hostname - // for docker.io for simplicity of implementation and the fact that it - // returns search results. - hostname := registry - if registry == dockerHostname { - hostname = dockerV1Hostname - } - - client, err := newDockerClient(sys, hostname, registry) - if err != nil { - return nil, errors.Wrapf(err, "error creating new docker client") - } - client.username = username - client.password = password - - // Only try the v1 search endpoint if the search query is not empty. If it is - // empty skip to the v2 endpoint. - if image != "" { - // set up the query values for the v1 endpoint - u := url.URL{ - Path: "/v1/search", - } - q := u.Query() - q.Set("q", image) - q.Set("n", strconv.Itoa(limit)) - u.RawQuery = q.Encode() - - logrus.Debugf("trying to talk to v1 search endpoint") - resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil) - if err != nil { - logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) - } else { - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) - } else { - if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { - return nil, err - } - return v1Res.Results, nil - } - } - } - - logrus.Debugf("trying to talk to v2 search endpoint") - resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil) - if err != nil { - logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) - } else { - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) - } else { - if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { - return nil, err - } - searchRes := []SearchResult{} - for _, repo := range v2Res.Repositories { - if strings.Contains(repo, image) { - res := SearchResult{ - Name: repo, - } - searchRes = append(searchRes, res) - } - } - return searchRes, nil - } - } - - return nil, errors.Wrapf(err, "couldn't search registry %q", registry) -} - -// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. -func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { - if err := c.detectProperties(ctx); err != nil { - return nil, err - } - - url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) - return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) -} - -// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// streamLen, if not -1, specifies the length of the data expected on stream. -// makeRequest should generally be preferred. -// TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequest(method, url, stream) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. - req.ContentLength = streamLen - } - req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") - for n, h := range headers { - for _, hh := range h { - req.Header.Add(n, hh) - } - } - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } - if auth == v2Auth { - if err := c.setupRequestAuth(req, extraScope); err != nil { - return nil, err - } - } - logrus.Debugf("%s %s", method, url) - res, err := c.client.Do(req) - if err != nil { - return nil, err - } - return res, nil -} - -// we're using the challenges from the /v2/ ping response and not the one from the destination -// URL in this request because: -// -// 1) docker does that as well -// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request -// -// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up -func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { - if len(c.challenges) == 0 { - return nil - } - schemeNames := make([]string, 0, len(c.challenges)) - for _, challenge := range c.challenges { - schemeNames = append(schemeNames, challenge.Scheme) - switch challenge.Scheme { - case "basic": - req.SetBasicAuth(c.username, c.password) - return nil - case "bearer": - cacheKey := "" - scopes := []authScope{c.scope} - if extraScope != nil { - // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). - cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) - scopes = append(scopes, *extraScope) - } - var token bearerToken - t, inCache := c.tokenCache.Load(cacheKey) - if inCache { - token = t.(bearerToken) - } - if !inCache || time.Now().After(token.expirationTime) { - t, err := c.getBearerToken(req.Context(), challenge, scopes) - if err != nil { - return err - } - token = *t - c.tokenCache.Store(cacheKey, token) - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) - return nil - default: - logrus.Debugf("no handler for %s authentication", challenge.Scheme) - } - } - logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) - return nil -} - -func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return nil, errors.Errorf("missing realm in bearer auth challenge") - } - - authReq, err := http.NewRequest("GET", realm, nil) - if err != nil { - return nil, err - } - authReq = authReq.WithContext(ctx) - getParams := authReq.URL.Query() - if c.username != "" { - getParams.Add("account", c.username) - } - if service, ok := challenge.Parameters["service"]; ok && service != "" { - getParams.Add("service", service) - } - for _, scope := range scopes { - if scope.remoteName != "" && scope.actions != "" { - getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) - } - } - authReq.URL.RawQuery = getParams.Encode() - if c.username != "" && c.password != "" { - authReq.SetBasicAuth(c.username, c.password) - } - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) - res, err := c.client.Do(authReq) - if err != nil { - return nil, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusUnauthorized: - err := client.HandleErrorResponse(res) - logrus.Debugf("Server response when trying to obtain an access token: \n%q", err.Error()) - return nil, ErrUnauthorizedForCredentials - case http.StatusOK: - break - default: - return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) - } - tokenBlob, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - return newBearerTokenFromJSONBlob(tokenBlob) -} - -// detectPropertiesHelper performs the work of detectProperties which executes -// it at most once. -func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { - // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly - // specified by the system context - if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { - c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue - } - tr := tlsclientconfig.NewTransport() - tr.TLSClientConfig = c.tlsClientConfig - c.client = &http.Client{Transport: tr} - - ping := func(scheme string) error { - url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) - resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) - if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) - return err - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - c.challenges = parseAuthHeader(resp.Header) - c.scheme = scheme - c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" - return nil - } - err := ping("https") - if err != nil && c.tlsClientConfig.InsecureSkipVerify { - err = ping("http") - } - if err != nil { - err = errors.Wrap(err, "pinging docker registry returned") - if c.sys != nil && c.sys.DockerDisableV1Ping { - return err - } - // best effort to understand if we're talking to a V1 registry - pingV1 := func(scheme string) bool { - url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) - resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) - if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) - return false - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return false - } - return true - } - isV1 := pingV1("https") - if !isV1 && c.tlsClientConfig.InsecureSkipVerify { - isV1 = pingV1("http") - } - if isV1 { - err = ErrV1NotSupported - } - } - return err -} - -// detectProperties detects various properties of the registry. -// See the dockerClient documentation for members which are affected by this. -func (c *dockerClient) detectProperties(ctx context.Context) error { - c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) - return c.detectPropertiesError -} - -// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, -// using the original data structures. -func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) - res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - var parsedBody extensionSignatureList - if err := json.Unmarshal(body, &parsedBody); err != nil { - return nil, errors.Wrapf(err, "Error decoding signature list") - } - return &parsedBody, nil -} diff --git a/vendor/github.com/containers/image/v4/docker/docker_image.go b/vendor/github.com/containers/image/v4/docker/docker_image.go deleted file mode 100644 index 4332dc020..000000000 --- a/vendor/github.com/containers/image/v4/docker/docker_image.go +++ /dev/null @@ -1,107 +0,0 @@ -package docker - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods -// which are specific to Docker. -type Image struct { - types.ImageCloser - src *dockerImageSource -} - -// newImage returns a new Image interface type after setting up -// a client to the registry hosting the given image. -// The caller must call .Close() on the returned Image. -func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { - s, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, s) - if err != nil { - return nil, err - } - return &Image{ImageCloser: img, src: s}, nil -} - -// SourceRefFullName returns a fully expanded name for the repository this image is in. -func (i *Image) SourceRefFullName() string { - return i.src.ref.ref.Name() -} - -// GetRepositoryTags list all tags available in the repository. The tag -// provided inside the ImageReference will be ignored. (This is a -// backward-compatible shim method which calls the module-level -// GetRepositoryTags) -func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { - return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref) -} - -// GetRepositoryTags list all tags available in the repository. The tag -// provided inside the ImageReference will be ignored. -func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { - dr, ok := ref.(dockerReference) - if !ok { - return nil, errors.Errorf("ref must be a dockerReference") - } - - path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) - client, err := newDockerClientFromRef(sys, dr, false, "pull") - if err != nil { - return nil, errors.Wrap(err, "failed to create client") - } - - tags := make([]string, 0) - - for { - res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - // print url also - return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) - } - - var tagsHolder struct { - Tags []string - } - if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { - return nil, err - } - tags = append(tags, tagsHolder.Tags...) - - link := res.Header.Get("Link") - if link == "" { - break - } - - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) - if err != nil { - return tags, err - } - - // can be relative or absolute, but we only want the path (and I - // guess we're in trouble if it forwards to a new place...) - path = linkURL.Path - if linkURL.RawQuery != "" { - path += "?" - path += linkURL.RawQuery - } - } - return tags, nil -} diff --git a/vendor/github.com/containers/image/v4/docker/docker_image_dest.go b/vendor/github.com/containers/image/v4/docker/docker_image_dest.go deleted file mode 100644 index 0f351ab59..000000000 --- a/vendor/github.com/containers/image/v4/docker/docker_image_dest.go +++ /dev/null @@ -1,611 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/pkg/blobinfocache/none" - "github.com/containers/image/v4/types" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerImageDestination struct { - ref dockerReference - c *dockerClient - // State - manifestDigest digest.Digest // or "" if not yet known. -} - -// newImageDestination creates a new ImageDestination for the specified image reference. -func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { - c, err := newDockerClientFromRef(sys, ref, true, "pull,push") - if err != nil { - return nil, err - } - return &dockerImageDestination{ - ref: ref, - c: c, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dockerImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dockerImageDestination) Close() error { - return nil -} - -func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { - if err := d.c.detectProperties(ctx); err != nil { - return err - } - switch { - case d.c.signatureBase != nil: - return nil - case d.c.supportsSignatures: - return nil - default: - return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") - } -} - -func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *dockerImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. -} - -// sizeCounter is an io.Writer which only counts the total size of its input. -type sizeCounter struct{ size int64 } - -func (c *sizeCounter) Write(p []byte) (n int, err error) { - c.size += int64(len(p)) - return len(p), nil -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *dockerImageDestination) HasThreadSafePutBlob() bool { - return true -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - if inputInfo.Digest.String() != "" { - // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. - // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. - // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. - haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false) - if err != nil { - return types.BlobInfo{}, err - } - if haveBlob { - return reusedInfo, nil - } - } - - // FIXME? Chunked upload, progress reporting, etc. - uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) - logrus.Debugf("Uploading %s", uploadPath) - res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil) - if err != nil { - return types.BlobInfo{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusAccepted { - logrus.Debugf("Error initiating layer upload, response %#v", *res) - return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry) - } - uploadLocation, err := res.Location() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") - } - - digester := digest.Canonical.Digester() - sizeCounter := &sizeCounter{} - tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) - res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil) - if err != nil { - logrus.Debugf("Error uploading layer chunked, response %#v", res) - return types.BlobInfo{}, err - } - defer res.Body.Close() - computedDigest := digester.Digest() - - uploadLocation, err = res.Location() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") - } - - // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) - - locationQuery := uploadLocation.Query() - // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 - locationQuery.Set("digest", computedDigest.String()) - uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) - if err != nil { - return types.BlobInfo{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - logrus.Debugf("Error uploading layer, response %#v", *res) - return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation) - } - - logrus.Debugf("Upload of layer %s complete", computedDigest) - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref)) - return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil -} - -// blobExists returns true iff repo contains a blob with digest, and if so, also its size. -// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { - checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) - logrus.Debugf("Checking %s", checkPath) - res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope) - if err != nil { - return false, -1, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusOK: - logrus.Debugf("... already exists") - return true, getBlobSize(res), nil - case http.StatusUnauthorized: - logrus.Debugf("... not authorized") - return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name()) - case http.StatusNotFound: - logrus.Debugf("... not present") - return false, -1, nil - default: - return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) - } -} - -// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. -func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { - u := url.URL{ - Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), - RawQuery: url.Values{ - "mount": {srcDigest.String()}, - "from": {reference.Path(srcRepo)}, - }.Encode(), - } - mountPath := u.String() - logrus.Debugf("Trying to mount %s", mountPath) - res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope) - if err != nil { - return err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusCreated: - logrus.Debugf("... mount OK") - return nil - case http.StatusAccepted: - // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. - // Abort, and let the ultimate caller do an upload when its ready, instead. - // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. - uploadLocation, err := res.Location() - if err != nil { - return errors.Wrap(err, "Error determining upload URL after a mount attempt") - } - logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) - res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) - if err != nil { - logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) - } else { - defer res2.Body.Close() - if res2.StatusCode != http.StatusNoContent { - logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) - } - } - // Anyway, if canceling the upload fails, ignore it and return the more important error: - return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) - default: - logrus.Debugf("Error mounting, response %#v", *res) - return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) - } -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - - // First, check whether the blob happens to already exist at the destination. - exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) - if err != nil { - return false, types.BlobInfo{}, err - } - if exists { - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil - } - - // Then try reusing blobs from other locations. - for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { - candidateRepo, err := parseBICLocationReference(candidate.Location) - if err != nil { - logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) - continue - } - logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) - - // Sanity checks: - if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { - logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) - continue - } - if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { - logrus.Debug("... Already tried the primary destination") - continue - } - - // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. - - // Checking candidateRepo, and mounting from it, requires an - // expanded token scope. - extraScope := &authScope{ - remoteName: reference.Path(candidateRepo), - actions: "pull", - } - // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. - // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. - // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. - // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. - // Even worse, docker/distribution does not actually reasonably implement canceling uploads - // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); - // so, be a nice client and don't create unnecesary upload sessions on the server. - exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) - if err != nil { - logrus.Debugf("... Failed: %v", err) - continue - } - if !exists { - // FIXME? Should we drop the blob from cache here (and elsewhere?)? - continue // logrus.Debug() already happened in blobExists - } - if candidateRepo.Name() != d.ref.ref.Name() { - if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { - logrus.Debugf("... Mount failed: %v", err) - continue - } - } - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil - } - - return false, types.BlobInfo{}, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - d.manifestDigest = digest - - refTail, err := d.ref.tagOrDigest() - if err != nil { - return err - } - path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) - - headers := map[string][]string{} - mimeType := manifest.GuessMIMEType(m) - if mimeType != "" { - headers["Content-Type"] = []string{mimeType} - } - res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil) - if err != nil { - return err - } - defer res.Body.Close() - if !successStatus(res.StatusCode) { - err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name()) - if isManifestInvalidError(errors.Cause(err)) { - err = types.ManifestTypeRejectedError{Err: err} - } - return err - } - return nil -} - -// successStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func successStatus(status int) bool { - return status >= 200 && status <= 399 -} - -// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. -func isManifestInvalidError(err error) bool { - errors, ok := err.(errcode.Errors) - if !ok || len(errors) == 0 { - return false - } - err = errors[0] - ec, ok := err.(errcode.ErrorCoder) - if !ok { - return false - } - - switch ec.ErrorCode() { - // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. - case v2.ErrorCodeManifestInvalid: - return true - // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) - // when uploading to a tag (because it can’t find a matching tag inside the manifest) - case v2.ErrorCodeTagInvalid: - return true - // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when - // uploading an OCI manifest that is (correctly, according to the spec) missing - // a top-level media type. See libpod issue #1719 - // FIXME: remove this case when ECR behavior is fixed - case errcode.ErrorCodeUnsupported: - return strings.Contains(err.Error(), "Invalid JSON syntax") - default: - return false - } -} - -func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - // Do not fail if we don’t really need to support signatures. - if len(signatures) == 0 { - return nil - } - if err := d.c.detectProperties(ctx); err != nil { - return err - } - switch { - case d.c.signatureBase != nil: - return d.putSignaturesToLookaside(signatures) - case d.c.supportsSignatures: - return d.putSignaturesToAPIExtension(ctx, signatures) - default: - return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") - } -} - -// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error { - // FIXME? This overwrites files one at a time, definitely not atomic. - // A failure when updating signatures with a reordered copy could lose some of them. - - // Skip dealing with the manifest digest if not necessary. - if len(signatures) == 0 { - return nil - } - - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i, signature := range signatures { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - err := d.putOneSignature(url, signature) - if err != nil { - return err - } - } - // Remove any other signatures, if present. - // We stop at the first missing signature; if a previous deleting loop aborted - // prematurely, this may not clean up all of them, but one missing signature - // is enough for dockerImageSource to stop looking for other signatures, so that - // is sufficient. - for i := len(signatures); ; i++ { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - missing, err := d.c.deleteOneSignature(url) - if err != nil { - return err - } - if missing { - break - } - } - - return nil -} - -// putOneSignature stores one signature to url. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { - switch url.Scheme { - case "file": - logrus.Debugf("Writing to %s", url.Path) - err := os.MkdirAll(filepath.Dir(url.Path), 0755) - if err != nil { - return err - } - err = ioutil.WriteFile(url.Path, signature, 0644) - if err != nil { - return err - } - return nil - - case "http", "https": - return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) - default: - return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) - } -} - -// deleteOneSignature deletes a signature from url, if it exists. -// If it successfully determines that the signature does not exist, returns (true, nil) -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { - switch url.Scheme { - case "file": - logrus.Debugf("Deleting %s", url.Path) - err := os.Remove(url.Path) - if err != nil && os.IsNotExist(err) { - return true, nil - } - return false, err - - case "http", "https": - return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) - default: - return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) - } -} - -// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. -func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error { - // Skip dealing with the manifest digest, or reading the old state, if not necessary. - if len(signatures) == 0 { - return nil - } - - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures, - // but the X-Registry-Supports-Signatures API extension does not support that yet. - - existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range existingSignatures.Signatures { - existingSigNames[sig.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range existingSignatures.Signatures { - if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "Error generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - sig := extensionSignature{ - Version: extensionSignatureSchemaVersion, - Name: signatureName, - Type: extensionSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - if err != nil { - return err - } - - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String()) - res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - body, err := ioutil.ReadAll(res.Body) - if err == nil { - logrus.Debugf("Error body %s", string(body)) - } - logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) - return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry) - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) Commit(ctx context.Context) error { - return nil -} diff --git a/vendor/github.com/containers/image/v4/docker/docker_image_src.go b/vendor/github.com/containers/image/v4/docker/docker_image_src.go deleted file mode 100644 index 353b1a6c5..000000000 --- a/vendor/github.com/containers/image/v4/docker/docker_image_src.go +++ /dev/null @@ -1,451 +0,0 @@ -package docker - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "os" - "strconv" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/pkg/sysregistriesv2" - "github.com/containers/image/v4/types" - "github.com/docker/distribution/registry/client" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerImageSource struct { - ref dockerReference - c *dockerClient - // State - cachedManifest []byte // nil if not loaded yet - cachedManifestMIMEType string // Only valid if cachedManifest != nil -} - -// newImageSource creates a new ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { - registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) - if err != nil { - return nil, errors.Wrapf(err, "error loading registries configuration") - } - if registry == nil { - // No configuration was found for the provided reference, so use the - // equivalent of a default configuration. - registry = &sysregistriesv2.Registry{ - Endpoint: sysregistriesv2.Endpoint{ - Location: ref.ref.String(), - }, - Prefix: ref.ref.String(), - } - } - - primaryDomain := reference.Domain(ref.ref) - // Check all endpoints for the manifest availability. If we find one that does - // contain the image, it will be used for all future pull actions. Always try the - // non-mirror original location last; this both transparently handles the case - // of no mirrors configured, and ensures we return the error encountered when - // acessing the upstream location if all endpoints fail. - manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint") - pullSources, err := registry.PullSourcesFromReference(ref.ref) - if err != nil { - return nil, err - } - for _, pullSource := range pullSources { - logrus.Debugf("Trying to pull %q", pullSource.Reference) - dockerRef, err := newReference(pullSource.Reference) - if err != nil { - return nil, err - } - - endpointSys := sys - // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. - if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain { - copy := *endpointSys - copy.DockerAuthConfig = nil - endpointSys = © - } - - client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull") - if err != nil { - return nil, err - } - client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure - - testImageSource := &dockerImageSource{ - ref: dockerRef, - c: client, - } - - manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx) - if manifestLoadErr == nil { - return testImageSource, nil - } - } - return nil, manifestLoadErr -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dockerImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dockerImageSource) Close() error { - return nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *dockerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} - -// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) -// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. -func simplifyContentType(contentType string) string { - if contentType == "" { - return contentType - } - mimeType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return "" - } - return mimeType -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return s.fetchManifest(ctx, instanceDigest.String()) - } - err := s.ensureManifestIsLoaded(ctx) - if err != nil { - return nil, "", err - } - return s.cachedManifest, s.cachedManifestMIMEType, nil -} - -func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { - path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) - if err != nil { - return nil, "", err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name()) - } - manblob, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, "", err - } - return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil -} - -// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType -// -// ImageSource implementations are not required or expected to do any caching, -// but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) -// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious -// signature verification failures when pulling while a tag is being updated. -func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { - if s.cachedManifest != nil { - return nil - } - - reference, err := s.ref.tagOrDigest() - if err != nil { - return err - } - - manblob, mt, err := s.fetchManifest(ctx, reference) - if err != nil { - return err - } - // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. - s.cachedManifest = manblob - s.cachedManifestMIMEType = mt - return nil -} - -func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - var ( - resp *http.Response - err error - ) - for _, url := range urls { - resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) - if err == nil { - if resp.StatusCode != http.StatusOK { - err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode)) - logrus.Debug(err) - continue - } - break - } - } - if err != nil { - return nil, 0, err - } - return resp.Body, getBlobSize(resp), nil -} - -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return size -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *dockerImageSource) HasThreadSafeGetBlob() bool { - return true -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - return s.getExternalBlob(ctx, info.URLs) - } - - path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) - if err != nil { - return nil, 0, err - } - if res.StatusCode != http.StatusOK { - // print url also - return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) - } - cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref)) - return res.Body, getBlobSize(res), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if err := s.c.detectProperties(ctx); err != nil { - return nil, err - } - switch { - case s.c.signatureBase != nil: - return s.getSignaturesFromLookaside(ctx, instanceDigest) - case s.c.supportsSignatures: - return s.getSignaturesFromAPIExtension(ctx, instanceDigest) - default: - return [][]byte{}, nil - } -} - -// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, -// or finally, from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { - if instanceDigest != nil { - return *instanceDigest, nil - } - if digested, ok := s.ref.ref.(reference.Digested); ok { - d := digested.Digest() - if d.Algorithm() == digest.Canonical { - return d, nil - } - } - if err := s.ensureManifestIsLoaded(ctx); err != nil { - return "", err - } - return manifest.Digest(s.cachedManifest) -} - -// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return nil, err - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := [][]byte{} - for i := 0; ; i++ { - url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) - if url == nil { - return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - signature, missing, err := s.getOneSignature(ctx, url) - if err != nil { - return nil, err - } - if missing { - break - } - signatures = append(signatures, signature) - } - return signatures, nil -} - -// getOneSignature downloads one signature from url. -// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { - switch url.Scheme { - case "file": - logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) - if err != nil { - if os.IsNotExist(err) { - return nil, true, nil - } - return nil, false, err - } - return sig, false, nil - - case "http", "https": - logrus.Debugf("GET %s", url) - req, err := http.NewRequest("GET", url.String(), nil) - if err != nil { - return nil, false, err - } - req = req.WithContext(ctx) - res, err := s.c.client.Do(req) - if err != nil { - return nil, false, err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return nil, true, nil - } else if res.StatusCode != http.StatusOK { - return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) - } - sig, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, false, err - } - return sig, false, nil - - default: - return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) - } -} - -// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return nil, err - } - - parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest) - if err != nil { - return nil, err - } - - var sigs [][]byte - for _, sig := range parsedBody.Signatures { - if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// deleteImage deletes the named image from the registry, if supported. -func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { - // docker/distribution does not document what action should be used for deleting images. - // - // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. - // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. - // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). - // - // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". - c, err := newDockerClientFromRef(sys, ref, true, "*") - if err != nil { - return err - } - - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - refTail, err := ref.tagOrDigest() - if err != nil { - return err - } - getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) - get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil) - if err != nil { - return err - } - defer get.Body.Close() - manifestBody, err := ioutil.ReadAll(get.Body) - if err != nil { - return err - } - switch get.StatusCode { - case http.StatusOK: - case http.StatusNotFound: - return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) - default: - return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) - } - - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) - - // When retrieving the digest from a registry >= 2.3 use the following header: - // "Accept": "application/vnd.docker.distribution.manifest.v2+json" - delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil) - if err != nil { - return err - } - defer delete.Body.Close() - - body, err := ioutil.ReadAll(delete.Body) - if err != nil { - return err - } - if delete.StatusCode != http.StatusAccepted { - return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) - } - - if c.signatureBase != nil { - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err - } - - for i := 0; ; i++ { - url := signatureStorageURL(c.signatureBase, manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - missing, err := c.deleteOneSignature(url) - if err != nil { - return err - } - if missing { - break - } - } - } - - return nil -} diff --git a/vendor/github.com/containers/image/v4/docker/docker_transport.go b/vendor/github.com/containers/image/v4/docker/docker_transport.go deleted file mode 100644 index c9ce75e0d..000000000 --- a/vendor/github.com/containers/image/v4/docker/docker_transport.go +++ /dev/null @@ -1,168 +0,0 @@ -package docker - -import ( - "context" - "fmt" - "strings" - - "github.com/containers/image/v4/docker/policyconfiguration" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for Docker registry-hosted images. -var Transport = dockerTransport{} - -type dockerTransport struct{} - -func (t dockerTransport) Name() string { - return "docker" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// dockerReference is an ImageReference for Docker images. -type dockerReference struct { - ref reference.Named // By construction we know that !reference.IsNameOnly(ref) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - if !strings.HasPrefix(refString, "//") { - return nil, errors.Errorf("docker: image reference %s does not start with //", refString) - } - ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) - if err != nil { - return nil, err - } - ref = reference.TagNameOnly(ref) - return NewReference(ref) -} - -// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). -func NewReference(ref reference.Named) (types.ImageReference, error) { - return newReference(ref) -} - -// newReference returns a dockerReference for a named reference. -func newReference(ref reference.Named) (dockerReference, error) { - if reference.IsNameOnly(ref) { - return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // The docker/distribution API does not really support that (we can’t ask for an image with a specific - // tag and digest), so fail. This MAY be accepted in the future. - // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop - // the tag or the digest first?) - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported") - } - - return dockerReference{ - ref: ref, - }, nil -} - -func (ref dockerReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dockerReference) StringWithinTransport() string { - return "//" + reference.FamiliarString(ref.ref) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dockerReference) DockerReference() reference.Named { - return ref.ref -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dockerReference) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dockerReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.ref) -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - return newImage(ctx, sys, ref) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return deleteImage(ctx, sys, ref) -} - -// tagOrDigest returns a tag or digest from the reference. -func (ref dockerReference) tagOrDigest() (string, error) { - if ref, ok := ref.ref.(reference.Canonical); ok { - return ref.Digest().String(), nil - } - if ref, ok := ref.ref.(reference.NamedTagged); ok { - return ref.Tag(), nil - } - // This should not happen, NewReference above refuses reference.IsNameOnly values. - return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) -} diff --git a/vendor/github.com/containers/image/v4/docker/lookaside.go b/vendor/github.com/containers/image/v4/docker/lookaside.go deleted file mode 100644 index c43160f72..000000000 --- a/vendor/github.com/containers/image/v4/docker/lookaside.go +++ /dev/null @@ -1,202 +0,0 @@ -package docker - -import ( - "fmt" - "io/ioutil" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/types" - "github.com/ghodss/yaml" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' -var systemRegistriesDirPath = builtinRegistriesDirPath - -// builtinRegistriesDirPath is the path to registries.d. -// DO NOT change this, instead see systemRegistriesDirPath above. -const builtinRegistriesDirPath = "/etc/containers/registries.d" - -// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. -// NOTE: Keep this in sync with docs/registries.d.md! -type registryConfiguration struct { - DefaultDocker *registryNamespace `json:"default-docker"` - // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), - Docker map[string]registryNamespace `json:"docker"` -} - -// registryNamespace defines lookaside locations for a single namespace. -type registryNamespace struct { - SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. - SigStoreStaging string `json:"sigstore-staging"` // For writing only. -} - -// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. -// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below. -type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported. - -// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. -func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) { - // FIXME? Loading and parsing the config could be cached across calls. - dirPath := registriesDirPath(sys) - logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) - config, err := loadAndMergeConfig(dirPath) - if err != nil { - return nil, err - } - - topLevel := config.signatureTopLevel(ref, write) - if topLevel == "" { - return nil, nil - } - - url, err := url.Parse(topLevel) - if err != nil { - return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) - } - // NOTE: Keep this in sync with docs/signature-protocols.md! - // FIXME? Restrict to explicitly supported schemes? - repo := reference.Path(ref.ref) // Note that this is without a tag or digest. - if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references - return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) - } - url.Path = url.Path + "/" + repo - return url, nil -} - -// registriesDirPath returns a path to registries.d -func registriesDirPath(sys *types.SystemContext) string { - if sys != nil { - if sys.RegistriesDirPath != "" { - return sys.RegistriesDirPath - } - if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) - } - } - return systemRegistriesDirPath -} - -// loadAndMergeConfig loads configuration files in dirPath -func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { - mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} - dockerDefaultMergedFrom := "" - nsMergedFrom := map[string]string{} - - dir, err := os.Open(dirPath) - if err != nil { - if os.IsNotExist(err) { - return &mergedConfig, nil - } - return nil, err - } - configNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - for _, configName := range configNames { - if !strings.HasSuffix(configName, ".yaml") { - continue - } - configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err - } - - var config registryConfiguration - err = yaml.Unmarshal(configBytes, &config) - if err != nil { - return nil, errors.Wrapf(err, "Error parsing %s", configPath) - } - - if config.DefaultDocker != nil { - if mergedConfig.DefaultDocker != nil { - return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, - dockerDefaultMergedFrom, configPath) - } - mergedConfig.DefaultDocker = config.DefaultDocker - dockerDefaultMergedFrom = configPath - } - - for nsName, nsConfig := range config.Docker { // includes config.Docker == nil - if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, - nsName, nsMergedFrom[nsName], configPath) - } - mergedConfig.Docker[nsName] = nsConfig - nsMergedFrom[nsName] = configPath - } - } - - return &mergedConfig, nil -} - -// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. -// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used. -func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { - if config.Docker != nil { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Using "docker" namespace %s`, identity) - if url := ns.signatureTopLevel(write); url != "" { - return url - } - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Using "docker" namespace %s`, name) - if url := ns.signatureTopLevel(write); url != "" { - return url - } - } - } - } - // Look for a default location - if config.DefaultDocker != nil { - logrus.Debugf(` Using "default-docker" configuration`) - if url := config.DefaultDocker.signatureTopLevel(write); url != "" { - return url - } - } - logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity()) - return "" -} - -// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. -// or "" if nothing has been configured. -func (ns registryNamespace) signatureTopLevel(write bool) string { - if write && ns.SigStoreStaging != "" { - logrus.Debugf(` Using %s`, ns.SigStoreStaging) - return ns.SigStoreStaging - } - if ns.SigStore != "" { - logrus.Debugf(` Using %s`, ns.SigStore) - return ns.SigStore - } - return "" -} - -// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. -// Returns nil iff base == nil. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { - if base == nil { - return nil - } - url := *base - url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) - return &url -} diff --git a/vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go deleted file mode 100644 index e2ed631c5..000000000 --- a/vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go +++ /dev/null @@ -1,56 +0,0 @@ -package policyconfiguration - -import ( - "strings" - - "github.com/containers/image/v4/docker/reference" - "github.com/pkg/errors" -) - -// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceIdentity(ref reference.Named) (string, error) { - res := ref.Name() - tagged, isTagged := ref.(reference.NamedTagged) - digested, isDigested := ref.(reference.Canonical) - switch { - case isTagged && isDigested: // Note that this CAN actually happen. - return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) - case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() - return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) - case isTagged: - res = res + ":" + tagged.Tag() - case isDigested: - res = res + "@" + digested.Digest().String() - default: // Coverage: The above was supposed to be exhaustive. - return "", errors.New("Internal inconsistency, unexpected default branch") - } - return res, nil -} - -// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceNamespaces(ref reference.Named) []string { - // Look for a match of the repository, and then of the possible parent - // namespaces. Note that this only happens on the expanded host names - // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", - // then in its parent "docker.io/library"; in none of "busybox", - // un-namespaced "library" nor in "" supposedly implicitly representing "library/". - // - // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last - // iteration matches the host name (for any namespace). - res := []string{} - name := ref.Name() - for { - res = append(res, name) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} diff --git a/vendor/github.com/containers/image/v4/docker/reference/README.md b/vendor/github.com/containers/image/v4/docker/reference/README.md deleted file mode 100644 index 3c4d74eb4..000000000 --- a/vendor/github.com/containers/image/v4/docker/reference/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, -except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v4/docker/reference/helpers.go b/vendor/github.com/containers/image/v4/docker/reference/helpers.go deleted file mode 100644 index 978df7eab..000000000 --- a/vendor/github.com/containers/image/v4/docker/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/containers/image/v4/docker/reference/normalize.go b/vendor/github.com/containers/image/v4/docker/reference/normalize.go deleted file mode 100644 index 6a86ec64f..000000000 --- a/vendor/github.com/containers/image/v4/docker/reference/normalize.go +++ /dev/null @@ -1,181 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/containers/image/v4/docker/reference/reference.go b/vendor/github.com/containers/image/v4/docker/reference/reference.go deleted file mode 100644 index 8c0c23b2f..000000000 --- a/vendor/github.com/containers/image/v4/docker/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/containers/image/v4/docker/reference/regexp.go b/vendor/github.com/containers/image/v4/docker/reference/regexp.go deleted file mode 100644 index 786034932..000000000 --- a/vendor/github.com/containers/image/v4/docker/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/dest.go b/vendor/github.com/containers/image/v4/docker/tarfile/dest.go deleted file mode 100644 index aec8404b6..000000000 --- a/vendor/github.com/containers/image/v4/docker/tarfile/dest.go +++ /dev/null @@ -1,407 +0,0 @@ -package tarfile - -import ( - "archive/tar" - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/internal/tmpdir" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. -type Destination struct { - writer io.Writer - tar *tar.Writer - repoTags []reference.NamedTagged - // Other state. - blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs - config []byte -} - -// NewDestination returns a tarfile.Destination for the specified io.Writer. -func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination { - repoTags := []reference.NamedTagged{} - if ref != nil { - repoTags = append(repoTags, ref) - } - return &Destination{ - writer: dest, - tar: tar.NewWriter(dest), - repoTags: repoTags, - blobs: make(map[digest.Digest]types.BlobInfo), - } -} - -// AddRepoTags adds the specified tags to the destination's repoTags. -func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { - d.repoTags = append(d.repoTags, tags...) -} - -// SupportedManifestMIMETypes tells which manifest mime types the destination supports -// If an empty slice or nil it's returned, then any mime type can be tried to upload -func (d *Destination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *Destination) SupportsSignatures(ctx context.Context) error { - return errors.Errorf("Storing signatures for docker tar files is not supported") -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *Destination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *Destination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *Destination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *Destination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - // Ouch, we need to stream the blob into a temporary file just to determine the size. - // When the layer is decompressed, we also have to generate the digest on uncompressed datas. - if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { - logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") - streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") - if err != nil { - return types.BlobInfo{}, err - } - defer os.Remove(streamCopy.Name()) - defer streamCopy.Close() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(streamCopy, tee) - if err != nil { - return types.BlobInfo{}, err - } - _, err = streamCopy.Seek(0, os.SEEK_SET) - if err != nil { - return types.BlobInfo{}, err - } - inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. - if inputInfo.Digest == "" { - inputInfo.Digest = digester.Digest() - } - stream = streamCopy - logrus.Debugf("... streaming done") - } - - // Maybe the blob has been already sent - ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false) - if err != nil { - return types.BlobInfo{}, err - } - if ok { - return reusedInfo, nil - } - - if isConfig { - buf, err := ioutil.ReadAll(stream) - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream") - } - d.config = buf - if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file") - } - } else { - // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way - // writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described - // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) - // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers - // in the root of the tarball. - if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil { - return types.BlobInfo{}, err - } - } - d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size} - return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") - } - if blob, ok := d.blobs[info.Digest]; ok { - return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil - } - return false, types.BlobInfo{}, nil -} - -func (d *Destination) createRepositoriesFile(rootLayerID string) error { - repositories := map[string]map[string]string{} - for _, repoTag := range d.repoTags { - if val, ok := repositories[repoTag.Name()]; ok { - val[repoTag.Tag()] = rootLayerID - } else { - repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID} - } - } - - b, err := json.Marshal(repositories) - if err != nil { - return errors.Wrap(err, "Error marshaling repositories") - } - if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil { - return errors.Wrap(err, "Error writing config json file") - } - return nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *Destination) PutManifest(ctx context.Context, m []byte) error { - // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, - // so the caller trying a different manifest kind would be pointless. - var man manifest.Schema2 - if err := json.Unmarshal(m, &man); err != nil { - return errors.Wrap(err, "Error parsing manifest") - } - if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { - return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") - } - - layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors) - if err != nil { - return err - } - - if len(man.LayersDescriptors) > 0 { - if err := d.createRepositoriesFile(lastLayerID); err != nil { - return err - } - } - - repoTags := []string{} - for _, tag := range d.repoTags { - // For github.com/docker/docker consumers, this works just as well as - // refString := ref.String() - // because when reading the RepoTags strings, github.com/docker/docker/reference - // normalizes both of them to the same value. - // - // Doing it this way to include the normalized-out `docker.io[/library]` does make - // a difference for github.com/projectatomic/docker consumers, with the - // “Add --add-registry and --block-registry options to docker daemon” patch. - // These consumers treat reference strings which include a hostname and reference - // strings without a hostname differently. - // - // Using the host name here is more explicit about the intent, and it has the same - // effect as (docker pull) in projectatomic/docker, which tags the result using - // a hostname-qualified reference. - // See https://github.com/containers/image/issues/72 for a more detailed - // analysis and explanation. - refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) - repoTags = append(repoTags, refString) - } - - items := []ManifestItem{{ - Config: man.ConfigDescriptor.Digest.Hex() + ".json", - RepoTags: repoTags, - Layers: layerPaths, - Parent: "", - LayerSources: nil, - }} - itemsBytes, err := json.Marshal(&items) - if err != nil { - return err - } - - // FIXME? Do we also need to support the legacy format? - return d.sendBytes(manifestFileName, itemsBytes) -} - -// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers -func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) { - var chainID digest.Digest - lastLayerID = "" - for i, l := range layerDescriptors { - // This chainID value matches the computation in docker/docker/layer.CreateChainID … - if chainID == "" { - chainID = l.Digest - } else { - chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) - } - // … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent - // versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop. - // - // Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID - // (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more - // times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation - // which also mixes in the full image configuration seems unnecessary, at least as long as we are storing - // only a single image per tarball, i.e. all DiffID prefixes are unique (can’t differ only with - // configuration). - layerID := chainID.Hex() - - physicalLayerPath := l.Digest.Hex() + ".tar" - // The layer itself has been stored into physicalLayerPath in PutManifest. - // So, use that path for layerPaths used in the non-legacy manifest - layerPaths = append(layerPaths, physicalLayerPath) - // ... and create a symlink for the legacy format; - if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { - return nil, "", errors.Wrap(err, "Error creating layer symbolic link") - } - - b := []byte("1.0") - if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil { - return nil, "", errors.Wrap(err, "Error writing VERSION file") - } - - // The legacy format requires a config file per layer - layerConfig := make(map[string]interface{}) - layerConfig["id"] = layerID - - // The root layer doesn't have any parent - if lastLayerID != "" { - layerConfig["parent"] = lastLayerID - } - // The root layer configuration file is generated by using subpart of the image configuration - if i == len(layerDescriptors)-1 { - var config map[string]*json.RawMessage - err := json.Unmarshal(d.config, &config) - if err != nil { - return nil, "", errors.Wrap(err, "Error unmarshaling config") - } - for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { - layerConfig[attr] = config[attr] - } - } - b, err := json.Marshal(layerConfig) - if err != nil { - return nil, "", errors.Wrap(err, "Error marshaling layer config") - } - if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil { - return nil, "", errors.Wrap(err, "Error writing config json file") - } - - lastLayerID = layerID - } - return layerPaths, lastLayerID, nil -} - -type tarFI struct { - path string - size int64 - isSymlink bool -} - -func (t *tarFI) Name() string { - return t.path -} -func (t *tarFI) Size() int64 { - return t.size -} -func (t *tarFI) Mode() os.FileMode { - if t.isSymlink { - return os.ModeSymlink - } - return 0444 -} -func (t *tarFI) ModTime() time.Time { - return time.Unix(0, 0) -} -func (t *tarFI) IsDir() bool { - return false -} -func (t *tarFI) Sys() interface{} { - return nil -} - -// sendSymlink sends a symlink into the tar stream. -func (d *Destination) sendSymlink(path string, target string) error { - hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) - if err != nil { - return nil - } - logrus.Debugf("Sending as tar link %s -> %s", path, target) - return d.tar.WriteHeader(hdr) -} - -// sendBytes sends a path into the tar stream. -func (d *Destination) sendBytes(path string, b []byte) error { - return d.sendFile(path, int64(len(b)), bytes.NewReader(b)) -} - -// sendFile sends a file into the tar stream. -func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error { - hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") - if err != nil { - return nil - } - logrus.Debugf("Sending as tar file %s", path) - if err := d.tar.WriteHeader(hdr); err != nil { - return err - } - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - size, err := io.Copy(d.tar, stream) - if err != nil { - return err - } - if size != expectedSize { - return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) - } - return nil -} - -// PutSignatures adds the given signatures to the docker tarfile (currently not -// supported). MUST be called after PutManifest (signatures reference manifest -// contents) -func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte) error { - if len(signatures) != 0 { - return errors.Errorf("Storing signatures for docker tar files is not supported") - } - return nil -} - -// Commit finishes writing data to the underlying io.Writer. -// It is the caller's responsibility to close it, if necessary. -func (d *Destination) Commit(ctx context.Context) error { - return d.tar.Close() -} diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/doc.go b/vendor/github.com/containers/image/v4/docker/tarfile/doc.go deleted file mode 100644 index 4ea5369c0..000000000 --- a/vendor/github.com/containers/image/v4/docker/tarfile/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package tarfile is an internal implementation detail of some transports. -// Do not use outside of the github.com/containers/image repo! -package tarfile diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/src.go b/vendor/github.com/containers/image/v4/docker/tarfile/src.go deleted file mode 100644 index 78e4d6f65..000000000 --- a/vendor/github.com/containers/image/v4/docker/tarfile/src.go +++ /dev/null @@ -1,478 +0,0 @@ -package tarfile - -import ( - "archive/tar" - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "path" - "sync" - - "github.com/containers/image/v4/internal/tmpdir" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/pkg/compression" - "github.com/containers/image/v4/types" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Source is a partial implementation of types.ImageSource for reading from tarPath. -type Source struct { - tarPath string - removeTarPathOnClose bool // Remove temp file on close if true - // The following data is only available after ensureCachedDataIsPresent() succeeds - tarManifest *ManifestItem // nil if not available yet. - configBytes []byte - configDigest digest.Digest - orderedDiffIDList []digest.Digest - knownLayers map[digest.Digest]*layerInfo - // Other state - generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. - cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe - cacheDataResult error // Private state for ensureCachedDataIsPresent -} - -type layerInfo struct { - path string - size int64 -} - -// TODO: We could add support for multiple images in a single archive, so -// that people could use docker-archive:opensuse.tar:opensuse:leap as -// the source of an image. -// To do for both the NewSourceFromFile and NewSourceFromStream functions - -// NewSourceFromFile returns a tarfile.Source for the specified path. -func NewSourceFromFile(path string) (*Source, error) { - file, err := os.Open(path) - if err != nil { - return nil, errors.Wrapf(err, "error opening file %q", path) - } - defer file.Close() - - // If the file is already not compressed we can just return the file itself - // as a source. Otherwise we pass the stream to NewSourceFromStream. - stream, isCompressed, err := compression.AutoDecompress(file) - if err != nil { - return nil, errors.Wrapf(err, "Error detecting compression for file %q", path) - } - defer stream.Close() - if !isCompressed { - return &Source{ - tarPath: path, - }, nil - } - return NewSourceFromStream(stream) -} - -// NewSourceFromStream returns a tarfile.Source for the specified inputStream, -// which can be either compressed or uncompressed. The caller can close the -// inputStream immediately after NewSourceFromFile returns. -func NewSourceFromStream(inputStream io.Reader) (*Source, error) { - // FIXME: use SystemContext here. - // Save inputStream to a temporary file - tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar") - if err != nil { - return nil, errors.Wrap(err, "error creating temporary file") - } - defer tarCopyFile.Close() - - succeeded := false - defer func() { - if !succeeded { - os.Remove(tarCopyFile.Name()) - } - }() - - // In order to be compatible with docker-load, we need to support - // auto-decompression (it's also a nice quality-of-life thing to avoid - // giving users really confusing "invalid tar header" errors). - uncompressedStream, _, err := compression.AutoDecompress(inputStream) - if err != nil { - return nil, errors.Wrap(err, "Error auto-decompressing input") - } - defer uncompressedStream.Close() - - // Copy the plain archive to the temporary file. - // - // TODO: This can take quite some time, and should ideally be cancellable - // using a context.Context. - if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { - return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name()) - } - succeeded = true - - return &Source{ - tarPath: tarCopyFile.Name(), - removeTarPathOnClose: true, - }, nil -} - -// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. -type tarReadCloser struct { - *tar.Reader - backingFile *os.File -} - -func (t *tarReadCloser) Close() error { - return t.backingFile.Close() -} - -// openTarComponent returns a ReadCloser for the specific file within the archive. -// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), -// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. -// The caller should call .Close() on the returned stream. -func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) { - f, err := os.Open(s.tarPath) - if err != nil { - return nil, err - } - succeeded := false - defer func() { - if !succeeded { - f.Close() - } - }() - - tarReader, header, err := findTarComponent(f, componentPath) - if err != nil { - return nil, err - } - if header == nil { - return nil, os.ErrNotExist - } - if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested - // We follow only one symlink; so no loops are possible. - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - return nil, err - } - // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, - // so we don't care. - tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) - if err != nil { - return nil, err - } - if header == nil { - return nil, os.ErrNotExist - } - } - - if !header.FileInfo().Mode().IsRegular() { - return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) - } - succeeded = true - return &tarReadCloser{Reader: tarReader, backingFile: f}, nil -} - -// findTarComponent returns a header and a reader matching path within inputFile, -// or (nil, nil, nil) if not found. -func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { - t := tar.NewReader(inputFile) - for { - h, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, nil, err - } - if h.Name == path { - return t, h, nil - } - } - return nil, nil, nil -} - -// readTarComponent returns full contents of componentPath. -func (s *Source) readTarComponent(path string) ([]byte, error) { - file, err := s.openTarComponent(path) - if err != nil { - return nil, errors.Wrapf(err, "Error loading tar component %s", path) - } - defer file.Close() - bytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ensureCachedDataIsPresent loads data necessary for any of the public accessors. -// It is safe to call this from multi-threaded code. -func (s *Source) ensureCachedDataIsPresent() error { - s.cacheDataLock.Do(func() { - s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() - }) - return s.cacheDataResult -} - -// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. -// Call ensureCachedDataIsPresent instead. -func (s *Source) ensureCachedDataIsPresentPrivate() error { - // Read and parse manifest.json - tarManifest, err := s.loadTarManifest() - if err != nil { - return err - } - - // Check to make sure length is 1 - if len(tarManifest) != 1 { - return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) - } - - // Read and parse config. - configBytes, err := s.readTarComponent(tarManifest[0].Config) - if err != nil { - return err - } - var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. - if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { - return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) - } - - knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) - if err != nil { - return err - } - - // Success; commit. - s.tarManifest = &tarManifest[0] - s.configBytes = configBytes - s.configDigest = digest.FromBytes(configBytes) - s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs - s.knownLayers = knownLayers - return nil -} - -// loadTarManifest loads and decodes the manifest.json. -func (s *Source) loadTarManifest() ([]ManifestItem, error) { - // FIXME? Do we need to deal with the legacy format? - bytes, err := s.readTarComponent(manifestFileName) - if err != nil { - return nil, err - } - var items []ManifestItem - if err := json.Unmarshal(bytes, &items); err != nil { - return nil, errors.Wrap(err, "Error decoding tar manifest.json") - } - return items, nil -} - -// Close removes resources associated with an initialized Source, if any. -func (s *Source) Close() error { - if s.removeTarPathOnClose { - return os.Remove(s.tarPath) - } - return nil -} - -// LoadTarManifest loads and decodes the manifest.json -func (s *Source) LoadTarManifest() ([]ManifestItem, error) { - return s.loadTarManifest() -} - -func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { - // Collect layer data available in manifest and config. - if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { - return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) - } - knownLayers := map[digest.Digest]*layerInfo{} - unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. - for i, diffID := range parsedConfig.RootFS.DiffIDs { - if _, ok := knownLayers[diffID]; ok { - // Apparently it really can happen that a single image contains the same layer diff more than once. - // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter - // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. - continue - } - layerPath := tarManifest.Layers[i] - if _, ok := unknownLayerSizes[layerPath]; ok { - return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) - } - li := &layerInfo{ // A new element in each iteration - path: layerPath, - size: -1, - } - knownLayers[diffID] = li - unknownLayerSizes[layerPath] = li - } - - // Scan the tar file to collect layer sizes. - file, err := os.Open(s.tarPath) - if err != nil { - return nil, err - } - defer file.Close() - t := tar.NewReader(file) - for { - h, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - if li, ok := unknownLayerSizes[h.Name]; ok { - // Since GetBlob will decompress layers that are compressed we need - // to do the decompression here as well, otherwise we will - // incorrectly report the size. Pretty critical, since tools like - // umoci always compress layer blobs. Obviously we only bother with - // the slower method of checking if it's compressed. - uncompressedStream, isCompressed, err := compression.AutoDecompress(t) - if err != nil { - return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name) - } - defer uncompressedStream.Close() - - uncompressedSize := h.Size - if isCompressed { - uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) - if err != nil { - return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name) - } - } - li.size = uncompressedSize - delete(unknownLayerSizes, h.Name) - } - } - if len(unknownLayerSizes) != 0 { - return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. - } - - return knownLayers, nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. - return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) - } - if s.generatedManifest == nil { - if err := s.ensureCachedDataIsPresent(); err != nil { - return nil, "", err - } - m := manifest.Schema2{ - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: manifest.Schema2Descriptor{ - MediaType: manifest.DockerV2Schema2ConfigMediaType, - Size: int64(len(s.configBytes)), - Digest: s.configDigest, - }, - LayersDescriptors: []manifest.Schema2Descriptor{}, - } - for _, diffID := range s.orderedDiffIDList { - li, ok := s.knownLayers[diffID] - if !ok { - return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) - } - m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ - Digest: diffID, // diffID is a digest of the uncompressed tarball - MediaType: manifest.DockerV2Schema2LayerMediaType, - Size: li.size, - }) - } - manifestBytes, err := json.Marshal(&m) - if err != nil { - return nil, "", err - } - s.generatedManifest = manifestBytes - } - return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil -} - -// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. -type uncompressedReadCloser struct { - io.Reader - underlyingCloser func() error - uncompressedCloser func() error -} - -func (r uncompressedReadCloser) Close() error { - var res error - if err := r.uncompressedCloser(); err != nil { - res = err - } - if err := r.underlyingCloser(); err != nil && res == nil { - res = err - } - return res -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *Source) HasThreadSafeGetBlob() bool { - return true -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if err := s.ensureCachedDataIsPresent(); err != nil { - return nil, 0, err - } - - if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. - return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil - } - - if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, - underlyingStream, err := s.openTarComponent(li.path) - if err != nil { - return nil, 0, err - } - closeUnderlyingStream := true - defer func() { - if closeUnderlyingStream { - underlyingStream.Close() - } - }() - - // In order to handle the fact that digests != diffIDs (and thus that a - // caller which is trying to verify the blob will run into problems), - // we need to decompress blobs. This is a bit ugly, but it's a - // consequence of making everything addressable by their DiffID rather - // than by their digest... - // - // In particular, because the v2s2 manifest being generated uses - // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of - // layers not their _actual_ digest. The result is that copy/... will - // be verifing a "digest" which is not the actual layer's digest (but - // is instead the DiffID). - - uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) - if err != nil { - return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest) - } - - newStream := uncompressedReadCloser{ - Reader: uncompressedStream, - underlyingCloser: underlyingStream.Close, - uncompressedCloser: uncompressedStream.Close, - } - closeUnderlyingStream = false - - return newStream, li.size, nil - } - - return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. - return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) - } - return [][]byte{}, nil -} diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/types.go b/vendor/github.com/containers/image/v4/docker/tarfile/types.go deleted file mode 100644 index c630f5227..000000000 --- a/vendor/github.com/containers/image/v4/docker/tarfile/types.go +++ /dev/null @@ -1,28 +0,0 @@ -package tarfile - -import ( - "github.com/containers/image/v4/manifest" - "github.com/opencontainers/go-digest" -) - -// Various data structures. - -// Based on github.com/docker/docker/image/tarexport/tarexport.go -const ( - manifestFileName = "manifest.json" - legacyLayerFileName = "layer.tar" - legacyConfigFileName = "json" - legacyVersionFileName = "VERSION" - legacyRepositoriesFileName = "repositories" -) - -// ManifestItem is an element of the array stored in the top-level manifest.json file. -type ManifestItem struct { - Config string - RepoTags []string - Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` -} - -type imageID string diff --git a/vendor/github.com/containers/image/v4/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v4/docker/wwwauthenticate.go deleted file mode 100644 index 23664a74a..000000000 --- a/vendor/github.com/containers/image/v4/docker/wwwauthenticate.go +++ /dev/null @@ -1,159 +0,0 @@ -package docker - -// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. - -import ( - "net/http" - "strings" -) - -// challenge carries information from a WWW-Authenticate response header. -// See RFC 7235. -type challenge struct { - // Scheme is the auth-scheme according to RFC 7235 - Scheme string - - // Parameters are the auth-params according to RFC 7235 - Parameters map[string]string -} - -// Octet types from RFC 7230. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -// NOTE: This is not a fully compliant parser per RFC 7235: -// Most notably it does not support more than one challenge within a single header -// Some of the whitespace parsing also seems noncompliant. -// But it is clearly better than what we used to have… -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/containers/image/v4/image/docker_list.go b/vendor/github.com/containers/image/v4/image/docker_list.go deleted file mode 100644 index a11cd06b9..000000000 --- a/vendor/github.com/containers/image/v4/image/docker_list.go +++ /dev/null @@ -1,94 +0,0 @@ -package image - -import ( - "context" - "encoding/json" - "fmt" - "runtime" - - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type platformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} - -// A manifestDescriptor references a platform-specific manifest. -type manifestDescriptor struct { - manifest.Schema2Descriptor - Platform platformSpec `json:"platform"` -} - -type manifestList struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []manifestDescriptor `json:"manifests"` -} - -// chooseDigestFromManifestList parses blob as a schema2 manifest list, -// and returns the digest of the image appropriate for the current environment. -func chooseDigestFromManifestList(sys *types.SystemContext, blob []byte) (digest.Digest, error) { - wantedArch := runtime.GOARCH - if sys != nil && sys.ArchitectureChoice != "" { - wantedArch = sys.ArchitectureChoice - } - wantedOS := runtime.GOOS - if sys != nil && sys.OSChoice != "" { - wantedOS = sys.OSChoice - } - - list := manifestList{} - if err := json.Unmarshal(blob, &list); err != nil { - return "", err - } - for _, d := range list.Manifests { - if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { - return d.Digest, nil - } - } - return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) -} - -func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - targetManifestDigest, err := chooseDigestFromManifestList(sys, manblob) - if err != nil { - return nil, err - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, err - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} - -// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate -// for the current system from the manifest available from src. -func ChooseManifestInstanceFromManifestList(ctx context.Context, sys *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { - // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, - // probably along with manifest list editing. - blob, mt, err := src.Manifest(ctx) - if err != nil { - return "", err - } - if mt != manifest.DockerV2ListMediaType { - return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) - } - return chooseDigestFromManifestList(sys, blob) -} diff --git a/vendor/github.com/containers/image/v4/image/docker_schema1.go b/vendor/github.com/containers/image/v4/image/docker_schema1.go deleted file mode 100644 index 97ebeac06..000000000 --- a/vendor/github.com/containers/image/v4/image/docker_schema1.go +++ /dev/null @@ -1,202 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type manifestSchema1 struct { - m *manifest.Schema1 -} - -func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { - m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema1) manifestMIMEType() string { - return manifest.DockerV2Schema1SignedMediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { - return nil, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - v2s2, err := m.convertToManifestSchema2(nil, nil) - if err != nil { - return nil, err - } - return v2s2.OCIConfig(ctx) -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - // This is a bit convoluted: We can’t just have a "get embedded docker reference" method - // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually - // embed a full docker/distribution reference, but only the repo name and tag (without the host name). - // So we would have to provide a “return repo without host name, and tag” getter for the generic code, - // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the - // generic copy code needs to know about is reference.Named and that a manifest may need updating - // for some destinations. - name := reference.Path(ref) - var tag string - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } else { - tag = "" - } - return m.m.Name != name || m.m.Tag != tag -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { - return m.m.Inspect(nil) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - if options.EmbeddedDockerReference != nil { - copy.m.Name = reference.Path(options.EmbeddedDockerReference) - if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.m.Tag = tagged.Tag() - } else { - copy.m.Tag = "" - } - } - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, - // handle conversions between them by doing nothing. - case manifest.DockerV2Schema2MediaType: - m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - if err != nil { - return nil, err - } - return memoryImageFromManifest(m2), nil - case imgspecv1.MediaTypeImageManifest: - // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest - m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - if err != nil { - return nil, err - } - return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ - ManifestMIMEType: imgspecv1.MediaTypeImageManifest, - InformationOnly: options.InformationOnly, - }) - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -// Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { - if len(m.m.ExtractedV1Compatibility) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) - } - if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) - } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) - } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) - } - - // Build a list of the diffIDs for the non-empty layers. - diffIDs := []digest.Digest{} - var layers []manifest.Schema2Descriptor - for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index - - if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { - var size int64 - if uploadedLayerInfos != nil { - size = uploadedLayerInfos[v2Index].Size - } - var d digest.Digest - if layerDiffIDs != nil { - d = layerDiffIDs[v2Index] - } - layers = append(layers, manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Size: size, - Digest: m.m.FSLayers[v1Index].BlobSum, - }) - diffIDs = append(diffIDs, d) - } - } - configJSON, err := m.m.ToSchema2Config(diffIDs) - if err != nil { - return nil, err - } - configDescriptor := manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.container.image.v1+json", - Size: int64(len(configJSON)), - Digest: digest.FromBytes(configJSON), - } - - return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil -} diff --git a/vendor/github.com/containers/image/v4/image/docker_schema2.go b/vendor/github.com/containers/image/v4/image/docker_schema2.go deleted file mode 100644 index 9841bbd42..000000000 --- a/vendor/github.com/containers/image/v4/image/docker_schema2.go +++ /dev/null @@ -1,357 +0,0 @@ -package image - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/pkg/blobinfocache/none" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) -// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is -// a non-zero embedded timestamp; we could zero that, but that would just waste storage space -// in registries, so let’s use the same values. -var GzippedEmptyLayer = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} - -// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer -const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - m *manifest.Schema2 -} - -func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema2FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema2{ - src: src, - m: m, - }, nil -} - -// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { - return &manifestSchema2{ - src: src, - configBlob: configBlob, - m: manifest.Schema2FromComponents(config, layers), - } -} - -func (m *manifestSchema2) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema2) manifestMIMEType() string { - return m.m.MediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - configBlob, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields - // than OCI v1. This unmarshal makes sure we drop docker v2s2 - // fields that aren't needed in OCI v1. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(configBlob, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := ioutil.ReadAll(stream) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.Schema2Clone(m.m), - } - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: - return copy.convertToManifestSchema1(ctx, options.InformationOnly.Destination) - case imgspecv1.MediaTypeImageManifest: - return copy.convertToManifestOCI1(ctx) - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Image, error) { - configOCI, err := m.OCIConfig(ctx) - if err != nil { - return nil, err - } - configOCIBytes, err := json.Marshal(configOCI) - if err != nil { - return nil, err - } - - config := imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - } - - layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) - for idx := range layers { - layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - switch m.m.LayersDescriptors[idx].MediaType { - case manifest.DockerV2Schema2ForeignLayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable - case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip - case manifest.DockerV2SchemaLayerMediaTypeUncompressed: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema2LayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) - } - } - - m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) - return memoryImageFromManifest(m1), nil -} - -// Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest types.ImageDestination) (types.Image, error) { - configBytes, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - imageConfig := &manifest.Schema2Image{} - if err := json.Unmarshal(configBytes, imageConfig); err != nil { - return nil, err - } - - // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) - history := make([]manifest.Schema1History, len(imageConfig.History)) - nonemptyLayerIndex := 0 - var parentV1ID string // Set in the loop - v1ID := "" - haveGzippedEmptyLayer := false - if len(imageConfig.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) - } - for v2Index, historyEntry := range imageConfig.History { - parentV1ID = v1ID - v1Index := len(imageConfig.History) - 1 - v2Index - - var blobDigest digest.Digest - if historyEntry.EmptyLayer { - if !haveGzippedEmptyLayer { - logrus.Debugf("Uploading empty layer during conversion to schema 1") - // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, - // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) - if err != nil { - return nil, errors.Wrap(err, "Error uploading empty layer") - } - if info.Digest != GzippedEmptyLayerDigest { - return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest) - } - haveGzippedEmptyLayer = true - } - blobDigest = GzippedEmptyLayerDigest - } else { - if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) - } - blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest - nonemptyLayerIndex++ - } - - // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. - v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) - if err != nil { - return nil, err - } - v1ID = v - - fakeImage := manifest.Schema1V1Compatibility{ - ID: v1ID, - Parent: parentV1ID, - Comment: historyEntry.Comment, - Created: historyEntry.Created, - Author: historyEntry.Author, - ThrowAway: historyEntry.EmptyLayer, - } - fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} - v1CompatibilityBytes, err := json.Marshal(&fakeImage) - if err != nil { - return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) - } - - fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} - history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} - // Note that parentV1ID of the top layer is preserved when exiting this loop - } - - // Now patch in real configuration for the top layer (v1Index == 0) - v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. - if err != nil { - return nil, err - } - v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) - if err != nil { - return nil, err - } - history[0].V1Compatibility = string(v1Config) - - m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) - if err != nil { - return nil, err // This should never happen, we should have created all the components correctly. - } - return memoryImageFromManifest(m1), nil -} - -func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { - if err := blobDigest.Validate(); err != nil { - return "", err - } - parts := append([]string{blobDigest.Hex()}, others...) - v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) - return hex.EncodeToString(v1IDHash[:]), nil -} - -func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - delete(rawContents, "rootfs") - delete(rawContents, "history") - - updates := map[string]interface{}{"id": v1ID} - if parentV1ID != "" { - updates["parent"] = parentV1ID - } - if throwaway { - updates["throwaway"] = throwaway - } - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} diff --git a/vendor/github.com/containers/image/v4/image/manifest.go b/vendor/github.com/containers/image/v4/image/manifest.go deleted file mode 100644 index f384d2fb8..000000000 --- a/vendor/github.com/containers/image/v4/image/manifest.go +++ /dev/null @@ -1,73 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// genericManifest is an interface for parsing, modifying image manifests and related data. -// Note that the public methods are intended to be a subset of types.Image -// so that embedding a genericManifest into structs works. -// will support v1 one day... -type genericManifest interface { - serialize() ([]byte, error) - manifestMIMEType() string - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() types.BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*imgspecv1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*types.ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // This does not change the state of the original Image object. - UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) -} - -// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. -// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. -func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch manifest.NormalizedMIMEType(mt) { - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - return manifestSchema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return manifestOCI1FromManifest(src, manblob) - case manifest.DockerV2Schema2MediaType: - return manifestSchema2FromManifest(src, manblob) - case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(ctx, sys, src, manblob) - default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) - } -} - -// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. -func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { - blobs := make([]types.BlobInfo, len(layers)) - for i, layer := range layers { - blobs[i] = layer.BlobInfo - } - return blobs -} diff --git a/vendor/github.com/containers/image/v4/image/memory.go b/vendor/github.com/containers/image/v4/image/memory.go deleted file mode 100644 index 255965e14..000000000 --- a/vendor/github.com/containers/image/v4/image/memory.go +++ /dev/null @@ -1,65 +0,0 @@ -package image - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/containers/image/v4/types" -) - -// memoryImage is a mostly-implementation of types.Image assembled from data -// created in memory, used primarily as a return value of types.Image.UpdatedImage -// as a way to carry various structured information in a type-safe and easy-to-use way. -// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone -// collection of all related information, e.g. there is no way to get layer blobs -// from a memoryImage. -type memoryImage struct { - genericManifest - serializedManifest []byte // A private cache for Manifest() -} - -func memoryImageFromManifest(m genericManifest) types.Image { - return &memoryImage{ - genericManifest: m, - serializedManifest: nil, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *memoryImage) Reference() types.ImageReference { - // It would really be inappropriate to return the ImageReference of the image this was based on. - return nil -} - -// Size returns the size of the image as stored, if known, or -1 if not. -func (i *memoryImage) Size() (int64, error) { - return -1, nil -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.serializedManifest == nil { - m, err := i.genericManifest.serialize() - if err != nil { - return nil, "", err - } - i.serializedManifest = m - } - return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { - // Modifying an image invalidates signatures; a caller asking the updated image for signatures - // is probably confused. - return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") -} - -// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v4/image/oci.go b/vendor/github.com/containers/image/v4/image/oci.go deleted file mode 100644 index 142b0f28f..000000000 --- a/vendor/github.com/containers/image/v4/image/oci.go +++ /dev/null @@ -1,214 +0,0 @@ -package image - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/pkg/blobinfocache/none" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of m.Config. - m *manifest.OCI1 -} - -func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestOCI1{ - src: src, - m: m, - }, nil -} - -// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { - return &manifestOCI1{ - src: src, - configBlob: configBlob, - m: manifest.OCI1FromComponents(config, layers), - } -} - -func (m *manifestOCI1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestOCI1) manifestMIMEType() string { - return imgspecv1.MediaTypeImageManifest -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := ioutil.ReadAll(stream) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - cb, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.OCI1Clone(m.m), - } - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We can't directly convert to V1, but we can transitively convert via a V2 image - m2, err := copy.convertToManifestSchema2() - if err != nil { - return nil, err - } - return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ - ManifestMIMEType: options.ManifestMIMEType, - InformationOnly: options.InformationOnly, - }) - case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2() - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { - return manifest.Schema2Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { - // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(m.m.Config) - - // The only difference between OCI and DockerSchema2 is the mediatypes. The - // media type of the manifest is handled by manifestSchema2FromComponents. - config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) - for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) - switch layers[idx].MediaType { - case imgspecv1.MediaTypeImageLayerNonDistributable: - layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType - case imgspecv1.MediaTypeImageLayerNonDistributableGzip: - layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip - case imgspecv1.MediaTypeImageLayerNonDistributableZstd: - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - case imgspecv1.MediaTypeImageLayer: - layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed - case imgspecv1.MediaTypeImageLayerGzip: - layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType - case imgspecv1.MediaTypeImageLayerZstd: - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) - } - } - - // Rather than copying the ConfigBlob now, we just pass m.src to the - // translated manifest, since the only difference is the mediatype of - // descriptors there is no change to any blob stored in m.src. - m1 := manifestSchema2FromComponents(config, m.src, nil, layers) - return memoryImageFromManifest(m1), nil -} diff --git a/vendor/github.com/containers/image/v4/image/sourced.go b/vendor/github.com/containers/image/v4/image/sourced.go deleted file mode 100644 index d2a3e2ee6..000000000 --- a/vendor/github.com/containers/image/v4/image/sourced.go +++ /dev/null @@ -1,104 +0,0 @@ -// Package image consolidates knowledge about various container image formats -// (as opposed to image storage mechanisms, which are handled by types.ImageSource) -// and exposes all of them using an unified interface. -package image - -import ( - "context" - - "github.com/containers/image/v4/types" -) - -// imageCloser implements types.ImageCloser, perhaps allowing simple users -// to use a single object without having keep a reference to a types.ImageSource -// only to call types.ImageSource.Close(). -type imageCloser struct { - types.Image - src types.ImageSource -} - -// FromSource returns a types.ImageCloser implementation for the default instance of source. -// If source is a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate image instance. -// -// The caller must call .Close() on the returned ImageCloser. -// -// FromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// Image and ImageSource objects simultaneously, but it means that they only need to -// the Image.) -// -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { - img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) - if err != nil { - return nil, err - } - return &imageCloser{ - Image: img, - src: src, - }, nil -} - -func (ic *imageCloser) Close() error { - return ic.src.Close() -} - -// sourcedImage is a general set of utilities for working with container images, -// whatever is their underlying location (i.e. dockerImageSource-independent). -// Note the existence of skopeo/docker.Image: some instances of a `types.Image` -// may not be a `sourcedImage` directly. However, most users of `types.Image` -// do not care, and those who care about `skopeo/docker.Image` know they do. -type sourcedImage struct { - *UnparsedImage - manifestBlob []byte - manifestMIMEType string - // genericManifest contains data corresponding to manifestBlob. - // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest - // if you want to preserve the original manifest; use manifestBlob directly. - genericManifest -} - -// FromUnparsedImage returns a types.Image implementation for unparsed. -// If unparsed represents a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate single image. -// -// The Image must not be used after the underlying ImageSource is Close()d. -func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { - // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: - // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, - // this is the only UnparsedImage implementation around, anyway. - - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). - manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) - if err != nil { - return nil, err - } - - parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) - if err != nil { - return nil, err - } - - return &sourcedImage{ - UnparsedImage: unparsed, - manifestBlob: manifestBlob, - manifestMIMEType: manifestMIMEType, - genericManifest: parsedManifest, - }, nil -} - -// Size returns the size of the image as stored, if it's known, or -1 if it isn't. -func (i *sourcedImage) Size() (int64, error) { - return -1, nil -} - -// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. -func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { - return i.manifestBlob, i.manifestMIMEType, nil -} - -func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return i.UnparsedImage.src.LayerInfosForCopy(ctx) -} diff --git a/vendor/github.com/containers/image/v4/image/unparsed.go b/vendor/github.com/containers/image/v4/image/unparsed.go deleted file mode 100644 index d73107654..000000000 --- a/vendor/github.com/containers/image/v4/image/unparsed.go +++ /dev/null @@ -1,95 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// UnparsedImage implements types.UnparsedImage . -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -type UnparsedImage struct { - src types.ImageSource - instanceDigest *digest.Digest - cachedManifest []byte // A private cache for Manifest(); nil if not yet known. - // A private cache for Manifest(), may be the empty string if guessing failed. - // Valid iff cachedManifest is not nil. - cachedManifestMIMEType string - cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. -} - -// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { - return &UnparsedImage{ - src: src, - instanceDigest: instanceDigest, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *UnparsedImage) Reference() types.ImageReference { - // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. - return i.src.Reference() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) - if err != nil { - return nil, "", err - } - - // ImageSource.GetManifest does not do digest verification, but we do; - // this immediately protects also any user of types.Image. - if digest, haveDigest := i.expectedManifestDigest(); haveDigest { - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } - } - - i.cachedManifest = m - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. -// The bool return value seems redundant with digest != ""; it is used explicitly -// to refuse (unexpected) situations when the digest exists but is "". -func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { - if i.instanceDigest != nil { - return *i.instanceDigest, true - } - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - return canonical.Digest(), true - } - } - return "", false -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil -} diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go deleted file mode 100644 index 88e123cdd..000000000 --- a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// Key represents a single key linked to one or more kernel keyrings. -type Key struct { - Name string - - id, ring keyID - size int -} - -// ID returns the 32-bit kernel identifier for a specific key -func (k *Key) ID() int32 { - return int32(k.id) -} - -// Get the key's value as a byte slice -func (k *Key) Get() ([]byte, error) { - var ( - b []byte - err error - sizeRead int - ) - - if k.size == 0 { - k.size = 512 - } - - size := k.size - - b = make([]byte, int(size)) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - k.size = sizeRead - } - } - return b[:k.size], err -} - -// Unlink a key from the keyring it was loaded from (or added to). If the key -// is not linked to any other keyrings, it is destroyed. -func (k *Key) Unlink() error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) - return err -} - -// Describe returns a string describing the attributes of a specified key -func (k *Key) Describe() (string, error) { - keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) - if err != nil { - return "", err - } - return keyAttr, nil -} diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go deleted file mode 100644 index 4bf170156..000000000 --- a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) -// -// Deprecated: Most callers should use either golang.org/x/sys/unix directly, -// or the original (and more extensive) github.com/jsipprell/keyctl . -package keyctl - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Keyring is the basic interface to a linux keyctl keyring. -type Keyring interface { - ID - Add(string, []byte) (*Key, error) - Search(string) (*Key, error) -} - -type keyring struct { - id keyID -} - -// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. -type ID interface { - ID() int32 -} - -// Add a new key to a keyring. The key can be searched for later by name. -func (kr *keyring) Add(name string, key []byte) (*Key, error) { - r, err := unix.AddKey("user", name, key, int(kr.id)) - if err == nil { - key := &Key{Name: name, id: keyID(r), ring: kr.id} - return key, nil - } - return nil, err -} - -// Search for a key by name, this also searches child keyrings linked to this -// one. The key, if found, is linked to the top keyring that Search() was called -// from. -func (kr *keyring) Search(name string) (*Key, error) { - id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) - if err == nil { - return &Key{Name: name, id: keyID(id), ring: kr.id}, nil - } - return nil, err -} - -// ID returns the 32-bit kernel identifier of a keyring -func (kr *keyring) ID() int32 { - return int32(kr.id) -} - -// SessionKeyring returns the current login session keyring -func SessionKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) -} - -// UserKeyring returns the keyring specific to the current user. -func UserKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_USER_KEYRING) -} - -// Unlink an object from a keyring -func Unlink(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// Link a key into a keyring -func Link(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it -func ReadUserKeyring() ([]*Key, error) { - var ( - b []byte - err error - sizeRead int - ) - krSize := 4 - size := krSize - b = make([]byte, size) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - krSize = sizeRead - } - } - keyIDs := getKeyIDsFromByte(b[:krSize]) - return keyIDs, err -} - -func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { - idSize := 4 - var keys []*Key - for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { - tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) - keys = append(keys, &Key{id: keyID(tempID)}) - } - return keys -} diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go deleted file mode 100644 index ae9697149..000000000 --- a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// KeyPerm represents in-kernel access control permission to keys and keyrings -// as a 32-bit integer broken up into four permission sets, one per byte. -// In MSB order, the perms are: Processor, User, Group, Other. -type KeyPerm uint32 - -const ( - // PermOtherAll sets all permission for Other - PermOtherAll KeyPerm = 0x3f << (8 * iota) - // PermGroupAll sets all permission for Group - PermGroupAll - // PermUserAll sets all permission for User - PermUserAll - // PermProcessAll sets all permission for Processor - PermProcessAll -) - -// SetPerm sets the permissions on a key or keyring. -func SetPerm(k ID, p KeyPerm) error { - err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) - return err -} diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go deleted file mode 100644 index 196c82760..000000000 --- a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -type keyID int32 - -func newKeyring(id keyID) (*keyring, error) { - r1, err := unix.KeyctlGetKeyringID(int(id), true) - if err != nil { - return nil, err - } - - if id < 0 { - r1 = int(id) - } - return &keyring{id: keyID(r1)}, nil -} diff --git a/vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go deleted file mode 100644 index 8c776929c..000000000 --- a/vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go +++ /dev/null @@ -1,29 +0,0 @@ -package tmpdir - -import ( - "os" - "runtime" -) - -// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path' -var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles - -// builtinUnixTempDirForBigFiles is the directory path to store big files. -// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. -// DO NOT change this, instead see unixTempDirForBigFiles above. -const builtinUnixTempDirForBigFiles = "/var/tmp" - -// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. -// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp -// which on systemd based systems could be the unsuitable tmpfs filesystem. -func TemporaryDirectoryForBigFiles() string { - var temporaryDirectoryForBigFiles string - if runtime.GOOS == "windows" { - temporaryDirectoryForBigFiles = os.TempDir() - } else { - temporaryDirectoryForBigFiles = unixTempDirForBigFiles - } - return temporaryDirectoryForBigFiles -} diff --git a/vendor/github.com/containers/image/v4/manifest/docker_schema1.go b/vendor/github.com/containers/image/v4/manifest/docker_schema1.go deleted file mode 100644 index 3c172504a..000000000 --- a/vendor/github.com/containers/image/v4/manifest/docker_schema1.go +++ /dev/null @@ -1,316 +0,0 @@ -package manifest - -import ( - "encoding/json" - "regexp" - "strings" - "time" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/types" - "github.com/docker/docker/api/types/versions" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. -type Schema1FSLayers struct { - BlobSum digest.Digest `json:"blobSum"` -} - -// Schema1History is an entry of the "history" array in docker/distribution schema 1. -type Schema1History struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// Schema1 is a manifest in docker/distribution schema 1. -type Schema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []Schema1FSLayers `json:"fsLayers"` - History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! - ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) - SchemaVersion int `json:"schemaVersion"` -} - -type schema1V1CompatibilityContainerConfig struct { - Cmd []string -} - -// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. -type Schema1V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. -// (NOTE: The instance is not necessary a literal representation of the original blob, -// layers with duplicate IDs are eliminated.) -func Schema1FromManifest(manifest []byte) (*Schema1, error) { - s1 := Schema1{} - if err := json.Unmarshal(manifest, &s1); err != nil { - return nil, err - } - if s1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) - } - if err := s1.initialize(); err != nil { - return nil, err - } - if err := s1.fixManifestLayers(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. -func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - s1 := Schema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } - if err := s1.initialize(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1Clone creates a copy of the supplied Schema1 manifest. -func Schema1Clone(src *Schema1) *Schema1 { - copy := *src - return © -} - -// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. -func (m *Schema1) initialize() error { - if len(m.FSLayers) != len(m.History) { - return errors.New("length of history not equal to number of layers") - } - if len(m.FSLayers) == 0 { - return errors.New("no FSLayers in manifest") - } - m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) - for i, h := range m.History { - if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { - return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i) - } - } - return nil -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema1) LayerInfos() []LayerInfo { - layers := make([]LayerInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = LayerInfo{ - BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, - EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, - } - } - return layers -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. - if len(m.FSLayers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) - } - m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) - for i, info := range layerInfos { - // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema1) Serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) - if err != nil { - return nil, err - } - return AddDummyV2S1Signature(unsigned) -} - -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), -// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, -// both from m.History and m.FSLayers). -// Note that even after this succeeds, m.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func (m *Schema1) fixManifestLayers() error { - // m.initialize() has verified that len(m.FSLayers) == len(m.History) - for _, compat := range m.ExtractedV1Compatibility { - if err := validateV1ID(compat.ID); err != nil { - return err - } - } - if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range m.ExtractedV1Compatibility { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { - if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) - } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) - } - } - return nil -} - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - s1 := &Schema2V1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { - return nil, err - } - i := &types.ImageInspectInfo{ - Tag: m.Tag, - Created: &s1.Created, - DockerVersion: s1.DockerVersion, - Architecture: s1.Architecture, - Os: s1.OS, - Layers: layerInfosToStrings(m.LayerInfos()), - } - if s1.Config != nil { - i.Labels = s1.Config.Labels - i.Env = s1.Config.Env - } - return i, nil -} - -// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. -func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { - // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields - // that aren't directly comparable using info from the manifest. - if len(m.History) == 0 { - return nil, errors.New("image has no layers") - } - s1 := Schema2V1Image{} - config := []byte(m.History[0].V1Compatibility) - err := json.Unmarshal(config, &s1) - if err != nil { - return nil, errors.Wrapf(err, "error decoding configuration") - } - // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, - // adding some fields that aren't "omitempty". - if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { - config, err = json.Marshal(&s1) - if err != nil { - return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1) - } - } - // Build the history. - convertedHistory := []Schema2History{} - for _, compat := range m.ExtractedV1Compatibility { - hitem := Schema2History{ - Created: compat.Created, - CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), - Author: compat.Author, - Comment: compat.Comment, - EmptyLayer: compat.ThrowAway, - } - convertedHistory = append([]Schema2History{hitem}, convertedHistory...) - } - // Build the rootfs information. We need the decompressed sums that we've been - // calculating to fill in the DiffIDs. It's expected (but not enforced by us) - // that the number of diffIDs corresponds to the number of non-EmptyLayer - // entries in the history. - rootFS := &Schema2RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - // And now for some raw manipulation. - raw := make(map[string]*json.RawMessage) - err = json.Unmarshal(config, &raw) - if err != nil { - return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1) - } - // Drop some fields. - delete(raw, "id") - delete(raw, "parent") - delete(raw, "parent_id") - delete(raw, "layer_id") - delete(raw, "throwaway") - delete(raw, "Size") - // Add the history and rootfs information. - rootfs, err := json.Marshal(rootFS) - if err != nil { - return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) - } - rawRootfs := json.RawMessage(rootfs) - raw["rootfs"] = &rawRootfs - history, err := json.Marshal(convertedHistory) - if err != nil { - return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) - } - rawHistory := json.RawMessage(history) - raw["history"] = &rawHistory - // Encode the result. - config, err = json.Marshal(raw) - if err != nil { - return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) - } - return config, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { - image, err := m.ToSchema2Config(diffIDs) - if err != nil { - return "", err - } - return digest.FromBytes(image).Hex(), nil -} diff --git a/vendor/github.com/containers/image/v4/manifest/docker_schema2.go b/vendor/github.com/containers/image/v4/manifest/docker_schema2.go deleted file mode 100644 index 84b189c8e..000000000 --- a/vendor/github.com/containers/image/v4/manifest/docker_schema2.go +++ /dev/null @@ -1,349 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/containers/image/v4/pkg/compression" - "github.com/containers/image/v4/pkg/strslice" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -type Schema2Descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - -// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. -func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { - return types.BlobInfo{ - Digest: desc.Digest, - Size: desc.Size, - URLs: desc.URLs, - MediaType: desc.MediaType, - } -} - -// Schema2 is a manifest in docker/distribution schema 2. -type Schema2 struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor Schema2Descriptor `json:"config"` - LayersDescriptors []Schema2Descriptor `json:"layers"` -} - -// Schema2Port is a Port, a string containing port number and protocol in the -// format "80/tcp", from docker/go-connections/nat. -type Schema2Port string - -// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from -// docker/go-connections/nat. -type Schema2PortSet map[Schema2Port]struct{} - -// Schema2HealthConfig is a HealthConfig, which holds configuration settings -// for the HEALTHCHECK feature, from docker/docker/api/types/container. -type Schema2HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Schema2Config is a Config in docker/docker/api/types/container. -type Schema2Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// Schema2V1Image is a V1Image in docker/docker/image. -type Schema2V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Schema2Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Schema2Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. -type Schema2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// Schema2History stores build commands that were used to create an image, from docker/docker/image. -type Schema2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Schema2Image is an Image in docker/docker/image. -type Schema2Image struct { - Schema2V1Image - Parent digest.Digest `json:"parent,omitempty"` - RootFS *Schema2RootFS `json:"rootfs,omitempty"` - History []Schema2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` -} - -// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. -func Schema2FromManifest(manifest []byte) (*Schema2, error) { - s2 := Schema2{} - if err := json.Unmarshal(manifest, &s2); err != nil { - return nil, err - } - // Check manifest's and layers' media types. - if err := SupportedSchema2MediaType(s2.MediaType); err != nil { - return nil, err - } - for _, layer := range s2.LayersDescriptors { - if err := SupportedSchema2MediaType(layer.MediaType); err != nil { - return nil, err - } - } - return &s2, nil -} - -// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. -func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { - return &Schema2{ - SchemaVersion: 2, - MediaType: DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, - } -} - -// Schema2Clone creates a copy of the supplied Schema2 manifest. -func Schema2Clone(src *Schema2) *Schema2 { - copy := *src - return © -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema2) ConfigInfo() types.BlobInfo { - return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema2) LayerInfos() []LayerInfo { - blobs := []LayerInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, LayerInfo{ - BlobInfo: BlobInfoFromSchema2Descriptor(layer), - EmptyLayer: false, - }) - } - return blobs -} - -// isSchema2ForeignLayer is a convenience wrapper to check if a given mime type -// is a compressed or decompressed schema 2 foreign layer. -func isSchema2ForeignLayer(mimeType string) bool { - switch mimeType { - case DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip: - return true - default: - return false - } -} - -// isSchema2Layer is a convenience wrapper to check if a given mime type is a -// compressed or decompressed schema 2 layer. -func isSchema2Layer(mimeType string) bool { - switch mimeType { - case DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType: - return true - default: - return false - } -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.LayersDescriptors) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) - } - original := m.LayersDescriptors - m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) - for i, info := range layerInfos { - // First make sure we support the media type of the original layer. - if err := SupportedSchema2MediaType(original[i].MediaType); err != nil { - return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) - } - - // Set the correct media types based on the specified compression - // operation, the desired compression algorithm AND the original media - // type. - // - // Note that manifests in containers-storage might be reporting the - // wrong media type since the original manifests are stored while layers - // are decompressed in storage. Hence, we need to consider the case - // that an already {de}compressed layer should be {de}compressed, which - // is being addressed in `isSchema2{Foreign}Layer`. - switch info.CompressionOperation { - case types.PreserveOriginal: - // Keep the original media type. - m.LayersDescriptors[i].MediaType = original[i].MediaType - - case types.Decompress: - // Decompress the original media type and check if it was - // non-distributable one or not. - mimeType := original[i].MediaType - switch { - case isSchema2ForeignLayer(mimeType): - m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaType - case isSchema2Layer(mimeType): - m.LayersDescriptors[i].MediaType = DockerV2SchemaLayerMediaTypeUncompressed - default: - return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) - } - - case types.Compress: - if info.CompressionAlgorithm == nil { - logrus.Debugf("Preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) - m.LayersDescriptors[i].MediaType = original[i].MediaType - break - } - // Compress the original media type and set the new one based on - // that type (distributable or not) and the specified compression - // algorithm. Throw an error if the algorithm is not supported. - switch info.CompressionAlgorithm.Name() { - case compression.Gzip.Name(): - mimeType := original[i].MediaType - switch { - case isSchema2ForeignLayer(mimeType): - m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaTypeGzip - case isSchema2Layer(mimeType): - m.LayersDescriptors[i].MediaType = DockerV2Schema2LayerMediaType - default: - return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) - } - case compression.Zstd.Name(): - return fmt.Errorf("Error preparing updated manifest: zstd compression is not supported for docker images") - default: - return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) - } - - default: - return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) - } - m.LayersDescriptors[i].Digest = info.Digest - m.LayersDescriptors[i].Size = info.Size - m.LayersDescriptors[i].URLs = info.URLs - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema2) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - s2 := &Schema2Image{} - if err := json.Unmarshal(config, s2); err != nil { - return nil, err - } - i := &types.ImageInspectInfo{ - Tag: "", - Created: &s2.Created, - DockerVersion: s2.DockerVersion, - Architecture: s2.Architecture, - Os: s2.OS, - Layers: layerInfosToStrings(m.LayerInfos()), - } - if s2.Config != nil { - i.Labels = s2.Config.Labels - i.Env = s2.Config.Env - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema2) ImageID([]digest.Digest) (string, error) { - if err := m.ConfigDescriptor.Digest.Validate(); err != nil { - return "", err - } - return m.ConfigDescriptor.Digest.Hex(), nil -} diff --git a/vendor/github.com/containers/image/v4/manifest/manifest.go b/vendor/github.com/containers/image/v4/manifest/manifest.go deleted file mode 100644 index 32af97ea8..000000000 --- a/vendor/github.com/containers/image/v4/manifest/manifest.go +++ /dev/null @@ -1,257 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - - "github.com/containers/image/v4/types" - "github.com/containers/libtrust" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// FIXME: Should we just use docker/distribution and docker/docker implementations directly? - -// FIXME(runcom, mitr): should we have a mediatype pkg?? -const ( - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" - // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. - DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" - // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzippped schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" -) - -// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. -func SupportedSchema2MediaType(m string) error { - switch m { - case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: - return nil - default: - return fmt.Errorf("unsupported docker v2s2 media type: %q", m) - } -} - -// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource -// should request from the backend unless directed otherwise. -var DefaultRequestedManifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2Schema1SignedMediaType, - DockerV2Schema1MediaType, - DockerV2ListMediaType, -} - -// Manifest is an interface for parsing, modifying image manifests in isolation. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members -// directly. -// -// See types.Image for functionality not limited to manifests, including format conversions and config parsing. -// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. -type Manifest interface { - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - ConfigInfo() types.BlobInfo - // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []LayerInfo - // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) - UpdateLayerInfos(layerInfos []types.BlobInfo) error - - // ImageID computes an ID which can uniquely identify this image by its contents, irrespective - // of which (of possibly more than one simultaneously valid) reference was used to locate the - // image, and unchanged by whether or how the layers are compressed. The result takes the form - // of the hexadecimal portion of a digest.Digest. - ImageID(diffIDs []digest.Digest) (string, error) - - // Inspect returns various information for (skopeo inspect) parsed from the manifest, - // incorporating information from a configuration blob returned by configGetter, if - // the underlying image format is expected to include a configuration blob. - Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) - - // Serialize returns the manifest in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! - Serialize() ([]byte, error) -} - -// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. -type LayerInfo struct { - types.BlobInfo - EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. -} - -// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. -// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, -// but we may not have such metadata available (e.g. when the manifest is a local file). -func GuessMIMEType(manifest []byte) string { - // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. - // Also docker/distribution/manifest.Versioned. - meta := struct { - MediaType string `json:"mediaType"` - SchemaVersion int `json:"schemaVersion"` - Signatures interface{} `json:"signatures"` - }{} - if err := json.Unmarshal(manifest, &meta); err != nil { - return "" - } - - switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. - return meta.MediaType - } - // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. - switch meta.SchemaVersion { - case 1: - if meta.Signatures != nil { - return DockerV2Schema1SignedMediaType - } - return DockerV2Schema1MediaType - case 2: - // best effort to understand if this is an OCI image since mediaType - // isn't in the manifest for OCI anymore - // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. - ociMan := struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - }{} - if err := json.Unmarshal(manifest, &ociMan); err != nil { - return "" - } - if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig { - return imgspecv1.MediaTypeImageManifest - } - ociIndex := struct { - Manifests []imgspecv1.Descriptor `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &ociIndex); err != nil { - return "" - } - if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest { - return imgspecv1.MediaTypeImageIndex - } - return DockerV2Schema2MediaType - } - return "" -} - -// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifest []byte) (digest.Digest, error) { - if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { - sig, err := libtrust.ParsePrettySignature(manifest, "signatures") - if err != nil { - return "", err - } - manifest, err = sig.Payload() - if err != nil { - // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string - // that libtrust itself has josebase64UrlEncode()d - return "", err - } - } - - return digest.FromBytes(manifest), nil -} - -// MatchesDigest returns true iff the manifest matches expectedDigest. -// Error may be set if this returns false. -// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, -// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { - // This should eventually support various digest types. - actualDigest, err := Digest(manifest) - if err != nil { - return false, err - } - return expectedDigest == actualDigest, nil -} - -// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. -// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). -func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err // Coverage: This can fail only if rand.Reader fails. - } - - js, err := libtrust.NewJSONSignature(manifest) - if err != nil { - return nil, err - } - if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. - return nil, err - } - return js.PrettySignature("signatures") -} - -// MIMETypeIsMultiImage returns true if mimeType is a list of images -func MIMETypeIsMultiImage(mimeType string) bool { - return mimeType == DockerV2ListMediaType -} - -// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, -// centralizing various workarounds. -func NormalizedMIMEType(input string) string { - switch input { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case "application/json": - return DockerV2Schema1SignedMediaType - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2ListMediaType: - return input - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return DockerV2Schema1SignedMediaType - } -} - -// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type -func FromBlob(manblob []byte, mt string) (Manifest, error) { - switch NormalizedMIMEType(mt) { - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: - return Schema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return OCI1FromManifest(manblob) - case DockerV2Schema2MediaType: - return Schema2FromManifest(manblob) - case DockerV2ListMediaType: - return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") - default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) - } -} - -// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() -// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. -func layerInfosToStrings(infos []LayerInfo) []string { - layers := make([]string, len(infos)) - for i, info := range infos { - layers[i] = info.Digest.String() - } - return layers -} diff --git a/vendor/github.com/containers/image/v4/manifest/oci.go b/vendor/github.com/containers/image/v4/manifest/oci.go deleted file mode 100644 index e483bbb19..000000000 --- a/vendor/github.com/containers/image/v4/manifest/oci.go +++ /dev/null @@ -1,243 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - - "github.com/containers/image/v4/pkg/compression" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. -func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { - return types.BlobInfo{ - Digest: desc.Digest, - Size: desc.Size, - URLs: desc.URLs, - Annotations: desc.Annotations, - MediaType: desc.MediaType, - } -} - -// OCI1 is a manifest.Manifest implementation for OCI images. -// The underlying data from imgspecv1.Manifest is also available. -type OCI1 struct { - imgspecv1.Manifest -} - -// SupportedOCI1MediaType checks if the specified string is a supported OCI1 media type. -func SupportedOCI1MediaType(m string) error { - switch m { - case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader: - return nil - default: - return fmt.Errorf("unsupported OCIv1 media type: %q", m) - } -} - -// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. -func OCI1FromManifest(manifest []byte) (*OCI1, error) { - oci1 := OCI1{} - if err := json.Unmarshal(manifest, &oci1); err != nil { - return nil, err - } - // Check manifest's and layers' media types. - if err := SupportedOCI1MediaType(oci1.Config.MediaType); err != nil { - return nil, err - } - for _, layer := range oci1.Layers { - if err := SupportedOCI1MediaType(layer.MediaType); err != nil { - return nil, err - } - } - return &oci1, nil -} - -// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. -func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { - return &OCI1{ - imgspecv1.Manifest{ - Versioned: specs.Versioned{SchemaVersion: 2}, - Config: config, - Layers: layers, - }, - } -} - -// OCI1Clone creates a copy of the supplied OCI1 manifest. -func OCI1Clone(src *OCI1) *OCI1 { - return &OCI1{ - Manifest: src.Manifest, - } -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *OCI1) ConfigInfo() types.BlobInfo { - return BlobInfoFromOCI1Descriptor(m.Config) -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *OCI1) LayerInfos() []LayerInfo { - blobs := []LayerInfo{} - for _, layer := range m.Layers { - blobs = append(blobs, LayerInfo{ - BlobInfo: BlobInfoFromOCI1Descriptor(layer), - EmptyLayer: false, - }) - } - return blobs -} - -// isOCI1NonDistributableLayer is a convenience wrapper to check if a given mime -// type is a compressed or decompressed OCI v1 non-distributable layer. -func isOCI1NonDistributableLayer(mimeType string) bool { - switch mimeType { - case imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd: - return true - default: - return false - } -} - -// isOCI1Layer is a convenience wrapper to check if a given mime type is a -// compressed or decompressed OCI v1 layer. -func isOCI1Layer(mimeType string) bool { - switch mimeType { - case imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd: - return true - default: - return false - } -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.Layers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) - } - original := m.Layers - m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) - for i, info := range layerInfos { - // First make sure we support the media type of the original layer. - if err := SupportedOCI1MediaType(original[i].MediaType); err != nil { - return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) - } - - // Set the correct media types based on the specified compression - // operation, the desired compression algorithm AND the original media - // type. - // - // Note that manifests in containers-storage might be reporting the - // wrong media type since the original manifests are stored while layers - // are decompressed in storage. Hence, we need to consider the case - // that an already {de}compressed layer should be {de}compressed, which - // is being addressed in `isSchema2{Foreign}Layer`. - switch info.CompressionOperation { - case types.PreserveOriginal: - // Keep the original media type. - m.Layers[i].MediaType = original[i].MediaType - - case types.Decompress: - // Decompress the original media type and check if it was - // non-distributable one or not. - mimeType := original[i].MediaType - switch { - case isOCI1NonDistributableLayer(mimeType): - m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable - case isOCI1Layer(mimeType): - m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer - default: - return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) - } - - case types.Compress: - if info.CompressionAlgorithm == nil { - logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) - m.Layers[i].MediaType = original[i].MediaType - break - } - // Compress the original media type and set the new one based on - // that type (distributable or not) and the specified compression - // algorithm. Throw an error if the algorithm is not supported. - mimeType := original[i].MediaType - switch info.CompressionAlgorithm.Name() { - case compression.Gzip.Name(): - switch { - case isOCI1NonDistributableLayer(mimeType): - m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip - case isOCI1Layer(mimeType): - m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip - default: - return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) - } - - case compression.Zstd.Name(): - switch { - case isOCI1NonDistributableLayer(mimeType): - m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableZstd - case isOCI1Layer(mimeType): - m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd - default: - return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) - } - - default: - return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) - } - - default: - return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) - } - m.Layers[i].Digest = info.Digest - m.Layers[i].Size = info.Size - m.Layers[i].Annotations = info.Annotations - m.Layers[i].URLs = info.URLs - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *OCI1) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - v1 := &imgspecv1.Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - d1 := &Schema2V1Image{} - json.Unmarshal(config, d1) - i := &types.ImageInspectInfo{ - Tag: "", - Created: v1.Created, - DockerVersion: d1.DockerVersion, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - Layers: layerInfosToStrings(m.LayerInfos()), - Env: d1.Config.Env, - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *OCI1) ImageID([]digest.Digest) (string, error) { - if err := m.Config.Digest.Validate(); err != nil { - return "", err - } - return m.Config.Digest.Hex(), nil -} diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v4/oci/archive/oci_dest.go deleted file mode 100644 index 2455ed575..000000000 --- a/vendor/github.com/containers/image/v4/oci/archive/oci_dest.go +++ /dev/null @@ -1,151 +0,0 @@ -package archive - -import ( - "context" - "io" - "os" - - "github.com/containers/image/v4/types" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" -) - -type ociArchiveImageDestination struct { - ref ociArchiveReference - unpackedDest types.ImageDestination - tempDirRef tempDirOCIRef -} - -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { - tempDirRef, err := createOCIRef(ref.image) - if err != nil { - return nil, errors.Wrapf(err, "error creating oci reference") - } - unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) - if err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) - } - return nil, err - } - return &ociArchiveImageDestination{ref: ref, - unpackedDest: unpackedDest, - tempDirRef: tempDirRef}, nil -} - -// Reference returns the reference used to set up this destination. -func (d *ociArchiveImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any -// Close deletes the temp directory of the oci-archive image -func (d *ociArchiveImageDestination) Close() error { - defer d.tempDirRef.deleteTempDir() - return d.unpackedDest.Close() -} - -func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { - return d.unpackedDest.SupportedManifestMIMETypes() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures -func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { - return d.unpackedDest.SupportsSignatures(ctx) -} - -func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression { - return d.unpackedDest.DesiredLayerCompression() -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { - return d.unpackedDest.AcceptsForeignLayerURLs() -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise -func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { - return d.unpackedDest.MustMatchRuntimeOS() -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { - return d.unpackedDest.IgnoresEmbeddedDockerReference() -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) -} - -// PutManifest writes manifest to the destination -func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte) error { - return d.unpackedDest.PutManifest(ctx, m) -} - -func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - return d.unpackedDest.PutSignatures(ctx, signatures) -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted -// after the directory is made, it is tarred up into a file and the directory is deleted -func (d *ociArchiveImageDestination) Commit(ctx context.Context) error { - if err := d.unpackedDest.Commit(ctx); err != nil { - return errors.Wrapf(err, "error storing image %q", d.ref.image) - } - - // path of directory to tar up - src := d.tempDirRef.tempDirectory - // path to save tarred up file - dst := d.ref.resolvedFile - return tarDirectory(src, dst) -} - -// tar converts the directory at src and saves it to dst -func tarDirectory(src, dst string) error { - // input is a stream of bytes from the archive of the directory at path - input, err := archive.Tar(src, archive.Uncompressed) - if err != nil { - return errors.Wrapf(err, "error retrieving stream of bytes from %q", src) - } - - // creates the tar file - outFile, err := os.Create(dst) - if err != nil { - return errors.Wrapf(err, "error creating tar file %q", dst) - } - defer outFile.Close() - - // copies the contents of the directory to the tar file - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - _, err = io.Copy(outFile, input) - - return err -} diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_src.go b/vendor/github.com/containers/image/v4/oci/archive/oci_src.go deleted file mode 100644 index 8a479883f..000000000 --- a/vendor/github.com/containers/image/v4/oci/archive/oci_src.go +++ /dev/null @@ -1,102 +0,0 @@ -package archive - -import ( - "context" - "io" - - ocilayout "github.com/containers/image/v4/oci/layout" - "github.com/containers/image/v4/types" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ociArchiveImageSource struct { - ref ociArchiveReference - unpackedSrc types.ImageSource - tempDirRef tempDirOCIRef -} - -// newImageSource returns an ImageSource for reading from an existing directory. -// newImageSource untars the file and saves it in a temp directory -func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { - tempDirRef, err := createUntarTempDir(ref) - if err != nil { - return nil, errors.Wrap(err, "error creating temp directory") - } - - unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) - if err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) - } - return nil, err - } - return &ociArchiveImageSource{ref: ref, - unpackedSrc: unpackedSrc, - tempDirRef: tempDirRef}, nil -} - -// LoadManifestDescriptor loads the manifest -func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { - ociArchRef, ok := imgRef.(ociArchiveReference) - if !ok { - return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") - } - tempDirRef, err := createUntarTempDir(ociArchRef) - if err != nil { - return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory") - } - defer tempDirRef.deleteTempDir() - - descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) - if err != nil { - return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index") - } - return descriptor, nil -} - -// Reference returns the reference used to set up this source. -func (s *ociArchiveImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -// Close deletes the temporary directory at dst -func (s *ociArchiveImageSource) Close() error { - defer s.tempDirRef.deleteTempDir() - return s.unpackedSrc.Close() -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - return s.unpackedSrc.GetManifest(ctx, instanceDigest) -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - return s.unpackedSrc.GetBlob(ctx, info, cache) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return s.unpackedSrc.GetSignatures(ctx, instanceDigest) -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v4/oci/archive/oci_transport.go deleted file mode 100644 index c8808ecb5..000000000 --- a/vendor/github.com/containers/image/v4/oci/archive/oci_transport.go +++ /dev/null @@ -1,192 +0,0 @@ -package archive - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/containers/image/v4/directory/explicitfilepath" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/internal/tmpdir" - "github.com/containers/image/v4/oci/internal" - ocilayout "github.com/containers/image/v4/oci/layout" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OCI archive -// it creates an oci-archive tar file by calling into the OCI transport -// tarring the directory created by oci and deleting the directory -var Transport = ociArchiveTransport{} - -type ociArchiveTransport struct{} - -// ociArchiveReference is an ImageReference for OCI Archive paths -type ociArchiveReference struct { - file string - resolvedFile string - image string -} - -func (t ociArchiveTransport) Name() string { - return "oci-archive" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix -// into an ImageReference. -func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { - return internal.ValidateScope(scope) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. -func ParseReference(reference string) (types.ImageReference, error) { - file, image := internal.SplitPathAndImage(reference) - return NewReference(file, image) -} - -// NewReference returns an OCI reference for a file and a image. -func NewReference(file, image string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) - if err != nil { - return nil, err - } - - if err := internal.ValidateOCIPath(file); err != nil { - return nil, err - } - - if err := internal.ValidateImageName(image); err != nil { - return nil, err - } - - return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil -} - -func (ref ociArchiveReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -func (ref ociArchiveReference) StringWithinTransport() string { - return fmt.Sprintf("%s:%s", ref.file, ref.image) -} - -// DockerReference returns a Docker reference associated with this reference -func (ref ociArchiveReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -func (ref ociArchiveReference) PolicyConfigurationIdentity() string { - // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the - // same image and the two can’t be statically disambiguated. Using at least the repository directory is - // less granular but hopefully still useful. - return fmt.Sprintf("%s", ref.resolvedFile) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set -func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedFile - for { - lastSlash := strings.LastIndex(path, "/") - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by ociTransport.ValidatePolicyConfigurationScope above. - if lastSlash == -1 || path == "/" { - break - } - res = append(res, path) - path = path[:lastSlash] - } - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") -} - -// struct to store the ociReference and temporary directory returned by createOCIRef -type tempDirOCIRef struct { - tempDirectory string - ociRefExtracted types.ImageReference -} - -// deletes the temporary directory created -func (t *tempDirOCIRef) deleteTempDir() error { - return os.RemoveAll(t.tempDirectory) -} - -// createOCIRef creates the oci reference of the image -func createOCIRef(image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") - if err != nil { - return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") - } - ociRef, err := ocilayout.NewReference(dir, image) - if err != nil { - return tempDirOCIRef{}, err - } - - tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} - return tempDirRef, nil -} - -// creates the temporary directory and copies the tarred content to it -func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) { - tempDirRef, err := createOCIRef(ref.image) - if err != nil { - return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference") - } - src := ref.resolvedFile - dst := tempDirRef.tempDirectory - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - if err := archive.UntarPath(src, dst); err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) - } - return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory) - } - return tempDirRef, nil -} diff --git a/vendor/github.com/containers/image/v4/oci/internal/oci_util.go b/vendor/github.com/containers/image/v4/oci/internal/oci_util.go deleted file mode 100644 index c2012e50e..000000000 --- a/vendor/github.com/containers/image/v4/oci/internal/oci_util.go +++ /dev/null @@ -1,126 +0,0 @@ -package internal - -import ( - "github.com/pkg/errors" - "path/filepath" - "regexp" - "runtime" - "strings" -) - -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) -var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) - -// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. -// In any other case an error is returned. -func ValidateImageName(image string) error { - if len(image) == 0 { - return nil - } - - var err error - if !refRegexp.MatchString(image) { - err = errors.Errorf("Invalid image %s", image) - } - return err -} - -// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. -// Neither path nor image parts are validated at this stage. -func SplitPathAndImage(reference string) (string, string) { - if runtime.GOOS == "windows" { - return splitPathAndImageWindows(reference) - } - return splitPathAndImageNonWindows(reference) -} - -func splitPathAndImageWindows(reference string) (string, string) { - groups := windowsRefRegexp.FindStringSubmatch(reference) - // nil group means no match - if groups == nil { - return reference, "" - } - - // we expect three elements. First one full match, second the capture group for the path and - // the third the capture group for the image - if len(groups) != 3 { - return reference, "" - } - return groups[1], groups[2] -} - -func splitPathAndImageNonWindows(reference string) (string, string) { - sep := strings.SplitN(reference, ":", 2) - path := sep[0] - - var image string - if len(sep) == 2 { - image = sep[1] - } - return path, image -} - -// ValidateOCIPath takes the OCI path and validates it. -func ValidateOCIPath(path string) error { - if runtime.GOOS == "windows" { - // On Windows we must allow for a ':' as part of the path - if strings.Count(path, ":") > 1 { - return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) - } - } else { - if strings.Contains(path, ":") { - return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) - } - } - return nil -} - -// ValidateScope validates a policy configuration scope for an OCI transport. -func ValidateScope(scope string) error { - var err error - if runtime.GOOS == "windows" { - err = validateScopeWindows(scope) - } else { - err = validateScopeNonWindows(scope) - } - if err != nil { - return err - } - - cleaned := filepath.Clean(scope) - if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - - return nil -} - -func validateScopeWindows(scope string) error { - matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) - if !matched { - return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) - } - - return nil -} - -func validateScopeNonWindows(scope string) error { - if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - - return nil -} diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v4/oci/layout/oci_dest.go deleted file mode 100644 index 20925d3dc..000000000 --- a/vendor/github.com/containers/image/v4/oci/layout/oci_dest.go +++ /dev/null @@ -1,306 +0,0 @@ -package layout - -import ( - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - digest "github.com/opencontainers/go-digest" - imgspec "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ociImageDestination struct { - ref ociReference - index imgspecv1.Index - sharedBlobDir string - acceptUncompressedLayers bool -} - -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { - var index *imgspecv1.Index - if indexExists(ref) { - var err error - index, err = ref.getIndex() - if err != nil { - return nil, err - } - } else { - index = &imgspecv1.Index{ - Versioned: imgspec.Versioned{ - SchemaVersion: 2, - }, - } - } - - d := &ociImageDestination{ref: ref, index: *index} - if sys != nil { - d.sharedBlobDir = sys.OCISharedBlobDirPath - d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers - } - - if err := ensureDirectoryExists(d.ref.dir); err != nil { - return nil, err - } - // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, - // but it MAY be empty (e.g. if we never end up calling PutBlob) - // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 - if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { - return nil, err - } - return d, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ociImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ociImageDestination) Close() error { - return nil -} - -func (d *ociImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - imgspecv1.MediaTypeImageManifest, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { - return errors.Errorf("Pushing signatures for OCI images is not supported") -} - -func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { - if d.acceptUncompressedLayers { - return types.PreserveOriginal - } - return types.Compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *ociImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *ociImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") - if err != nil { - return types.BlobInfo{}, err - } - succeeded := false - explicitClosed := false - defer func() { - if !explicitClosed { - blobFile.Close() - } - if !succeeded { - os.Remove(blobFile.Name()) - } - }() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - - // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. - // On Windows, the “permissions of newly created files” argument to syscall.Open is - // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, - // always fails on Windows. - if runtime.GOOS != "windows" { - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err - } - } - - blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) - if err != nil { - return types.BlobInfo{}, err - } - if err := ensureParentDirectoryExists(blobPath); err != nil { - return types.BlobInfo{}, err - } - - // need to explicitly close the file, since a rename won't otherwise not work on Windows - blobFile.Close() - explicitClosed = true - if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err - } - succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) - if err != nil { - return false, types.BlobInfo{}, err - } - finfo, err := os.Stat(blobPath) - if err != nil && os.IsNotExist(err) { - return false, types.BlobInfo{}, nil - } - if err != nil { - return false, types.BlobInfo{}, err - } - return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - desc := imgspecv1.Descriptor{} - desc.Digest = digest - // TODO(runcom): beaware and add support for OCI manifest list - desc.MediaType = imgspecv1.MediaTypeImageManifest - desc.Size = int64(len(m)) - - blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) - if err != nil { - return err - } - if err := ensureParentDirectoryExists(blobPath); err != nil { - return err - } - if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { - return err - } - - if d.ref.image != "" { - annotations := make(map[string]string) - annotations["org.opencontainers.image.ref.name"] = d.ref.image - desc.Annotations = annotations - } - desc.Platform = &imgspecv1.Platform{ - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - } - d.addManifest(&desc) - - return nil -} - -func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { - for i, manifest := range d.index.Manifests { - if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { - // TODO Should there first be a cleanup based on the descriptor we are going to replace? - d.index.Manifests[i] = *desc - return - } - } - d.index.Manifests = append(d.index.Manifests, *desc) -} - -func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - if len(signatures) != 0 { - return errors.Errorf("Pushing signatures for OCI images is not supported") - } - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *ociImageDestination) Commit(ctx context.Context) error { - if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { - return err - } - indexJSON, err := json.Marshal(d.index) - if err != nil { - return err - } - return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) -} - -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -// ensureParentDirectoryExists ensures the parent of the supplied path exists. -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} - -// indexExists checks whether the index location specified in the OCI reference exists. -// The implementation is opinionated, since in case of unexpected errors false is returned -func indexExists(ref ociReference) bool { - _, err := os.Stat(ref.indexPath()) - if err == nil { - return true - } - if os.IsNotExist(err) { - return false - } - return true -} diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_src.go b/vendor/github.com/containers/image/v4/oci/layout/oci_src.go deleted file mode 100644 index dd6c6c4a6..000000000 --- a/vendor/github.com/containers/image/v4/oci/layout/oci_src.go +++ /dev/null @@ -1,171 +0,0 @@ -package layout - -import ( - "context" - "io" - "io/ioutil" - "net/http" - "os" - "strconv" - - "github.com/containers/image/v4/pkg/tlsclientconfig" - "github.com/containers/image/v4/types" - "github.com/docker/go-connections/tlsconfig" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ociImageSource struct { - ref ociReference - descriptor imgspecv1.Descriptor - client *http.Client - sharedBlobDir string -} - -// newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { - tr := tlsclientconfig.NewTransport() - tr.TLSClientConfig = tlsconfig.ServerDefault() - - if sys != nil && sys.OCICertPath != "" { - if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { - return nil, err - } - tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify - } - - client := &http.Client{} - client.Transport = tr - descriptor, err := ref.getManifestDescriptor() - if err != nil { - return nil, err - } - d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} - if sys != nil { - // TODO(jonboulle): check dir existence? - d.sharedBlobDir = sys.OCISharedBlobDirPath - } - return d, nil -} - -// Reference returns the reference used to set up this source. -func (s *ociImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *ociImageSource) Close() error { - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - var dig digest.Digest - var mimeType string - if instanceDigest == nil { - dig = digest.Digest(s.descriptor.Digest) - mimeType = s.descriptor.MediaType - } else { - dig = *instanceDigest - // XXX: instanceDigest means that we don't immediately have the context of what - // mediaType the manifest has. In OCI this means that we don't know - // what reference it came from, so we just *assume* that its - // MediaTypeImageManifest. - // FIXME: We should actually be able to look up the manifest in the index, - // and see the MIME type there. - mimeType = imgspecv1.MediaTypeImageManifest - } - - manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) - if err != nil { - return nil, "", err - } - m, err := ioutil.ReadFile(manifestPath) - if err != nil { - return nil, "", err - } - - return m, mimeType, nil -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *ociImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - return s.getExternalBlob(ctx, info.URLs) - } - - path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) - if err != nil { - return nil, 0, err - } - - r, err := os.Open(path) - if err != nil { - return nil, 0, err - } - fi, err := r.Stat() - if err != nil { - return nil, 0, err - } - return r, fi.Size(), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return [][]byte{}, nil -} - -func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - errWrap := errors.New("failed fetching external blob from all urls") - for _, url := range urls { - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) - continue - } - - resp, err := s.client.Do(req.WithContext(ctx)) - if err != nil { - errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) - continue - } - - if resp.StatusCode != http.StatusOK { - resp.Body.Close() - errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url) - continue - } - - return resp.Body, getBlobSize(resp), nil - } - - return nil, 0, errWrap -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} - -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return size -} diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v4/oci/layout/oci_transport.go deleted file mode 100644 index 259852b4d..000000000 --- a/vendor/github.com/containers/image/v4/oci/layout/oci_transport.go +++ /dev/null @@ -1,264 +0,0 @@ -package layout - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/image/v4/directory/explicitfilepath" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/oci/internal" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -var ( - // Transport is an ImageTransport for OCI directories. - Transport = ociTransport{} - - // ErrMoreThanOneImage is an error returned when the manifest includes - // more than one image and the user should choose which one to use. - ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") -) - -type ociTransport struct{} - -func (t ociTransport) Name() string { - return "oci" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { - return internal.ValidateScope(scope) -} - -// ociReference is an ImageReference for OCI directory paths. -type ociReference struct { - // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! - // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. - - // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid - // being exposed to symlinks and renames in the parent directories to the working directory). - // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) - dir string // As specified by the user. May be relative, contain symlinks, etc. - resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. - // If image=="", it means the "only image" in the index.json is used in the case it is a source - // for destinations, the image name annotation "image.ref.name" is not added to the index.json - image string -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. -func ParseReference(reference string) (types.ImageReference, error) { - dir, image := internal.SplitPathAndImage(reference) - return NewReference(dir, image) -} - -// NewReference returns an OCI reference for a directory and a image. -// -// We do not expose an API supplying the resolvedDir; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedDir. -func NewReference(dir, image string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) - if err != nil { - return nil, err - } - - if err := internal.ValidateOCIPath(dir); err != nil { - return nil, err - } - - if err = internal.ValidateImageName(image); err != nil { - return nil, err - } - - return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil -} - -func (ref ociReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ociReference) StringWithinTransport() string { - return fmt.Sprintf("%s:%s", ref.dir, ref.image) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ociReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref ociReference) PolicyConfigurationIdentity() string { - // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the - // same image and the two can’t be statically disambiguated. Using at least the repository directory is - // less granular but hopefully still useful. - return fmt.Sprintf("%s", ref.resolvedDir) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ociReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedDir - for { - lastSlash := strings.LastIndex(path, "/") - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by ociTransport.ValidatePolicyConfigurationScope above. - if lastSlash == -1 || path == "/" { - break - } - res = append(res, path) - path = path[:lastSlash] - } - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) -} - -// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together -// with an error. -func (ref ociReference) getIndex() (*imgspecv1.Index, error) { - indexJSON, err := os.Open(ref.indexPath()) - if err != nil { - return nil, err - } - defer indexJSON.Close() - - index := &imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(index); err != nil { - return nil, err - } - return index, nil -} - -func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { - index, err := ref.getIndex() - if err != nil { - return imgspecv1.Descriptor{}, err - } - - var d *imgspecv1.Descriptor - if ref.image == "" { - // return manifest if only one image is in the oci directory - if len(index.Manifests) == 1 { - d = &index.Manifests[0] - } else { - // ask user to choose image when more than one image in the oci directory - return imgspecv1.Descriptor{}, ErrMoreThanOneImage - } - } else { - // if image specified, look through all manifests for a match - for _, md := range index.Manifests { - if md.MediaType != imgspecv1.MediaTypeImageManifest { - continue - } - refName, ok := md.Annotations["org.opencontainers.image.ref.name"] - if !ok { - continue - } - if refName == ref.image { - d = &md - break - } - } - } - if d == nil { - return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) - } - return *d, nil -} - -// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name -// when pulling an image -func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { - ociRef, ok := imgRef.(ociReference) - if !ok { - return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") - } - return ociRef.getManifestDescriptor() -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") -} - -// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. -func (ref ociReference) ociLayoutPath() string { - return filepath.Join(ref.dir, "oci-layout") -} - -// indexPath returns a path for the index.json within a directory using OCI conventions. -func (ref ociReference) indexPath() string { - return filepath.Join(ref.dir, "index.json") -} - -// blobPath returns a path for a blob within a directory using OCI image-layout conventions. -func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { - if err := digest.Validate(); err != nil { - return "", errors.Wrapf(err, "unexpected digest reference %s", digest) - } - blobDir := filepath.Join(ref.dir, "blobs") - if sharedBlobDir != "" { - blobDir = sharedBlobDir - } - return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil -} diff --git a/vendor/github.com/containers/image/v4/openshift/openshift-copies.go b/vendor/github.com/containers/image/v4/openshift/openshift-copies.go deleted file mode 100644 index f45dc24c4..000000000 --- a/vendor/github.com/containers/image/v4/openshift/openshift-copies.go +++ /dev/null @@ -1,1170 +0,0 @@ -package openshift - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "time" - - "github.com/ghodss/yaml" - "github.com/imdario/mergo" - "github.com/pkg/errors" - "golang.org/x/net/http2" - "k8s.io/client-go/util/homedir" -) - -// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig. -// restTLSClientConfig contains settings to enable transport layer security -type restTLSClientConfig struct { - // Server requires TLS client certificate authentication - CertFile string - // Server requires TLS client certificate authentication - KeyFile string - // Trusted root certificates for server - CAFile string - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte -} - -// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config. -// Config holds the common attributes that can be passed to a Kubernetes client on -// initialization. -type restConfig struct { - // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. - // If a URL is given then the (optional) Path of that URL represents a prefix that must - // be appended to all request URIs used to access the apiserver. This allows a frontend - // proxy to easily relocate all of the apiserver endpoints. - Host string - - // Server requires Basic authentication - Username string - Password string - - // Server requires Bearer authentication. This client will not attempt to use - // refresh tokens for an OAuth2 flow. - // TODO: demonstrate an OAuth2 compatible client. - BearerToken string - - // TLSClientConfig contains settings to enable transport layer security - restTLSClientConfig - - // Server should be accessed without verifying the TLS - // certificate. For testing only. - Insecure bool -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. -// ClientConfig is used to make it easy to get an api server client -type clientConfig interface { - // ClientConfig returns a complete client config - ClientConfig() (*restConfig, error) -} - -// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. -func defaultClientConfig() clientConfig { - loadingRules := newOpenShiftClientConfigLoadingRules() - // REMOVED: Allowing command-line overriding of loadingRules - // REMOVED: clientcmd.ConfigOverrides - - clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) - - return clientConfig -} - -var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") - -// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. -// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. -// 1. --config value -// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file -func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { - chain := []string{} - - envVarFile := os.Getenv("KUBECONFIG") - if len(envVarFile) != 0 { - chain = append(chain, filepath.SplitList(envVarFile)...) - } else { - chain = append(chain, recommendedHomeFile) - } - - return &clientConfigLoadingRules{ - Precedence: chain, - // REMOVED: Migration support; run (oc login) to trigger migration - } -} - -// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. -// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules -// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that -// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before -// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid -// passing extraneous information down a call stack -type deferredLoadingClientConfig struct { - loadingRules *clientConfigLoadingRules - - clientConfig clientConfig -} - -// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name -func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { - return &deferredLoadingClientConfig{loadingRules: loadingRules} -} - -func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { - if config.clientConfig == nil { - // REMOVED: Support for concurrent use in multiple threads. - mergedConfig, err := config.loadingRules.Load() - if err != nil { - return nil, err - } - - var mergedClientConfig clientConfig - // REMOVED: Interactive fallback support. - mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig) - - config.clientConfig = mergedClientConfig - } - - return config.clientConfig, nil -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. -// ClientConfig implements ClientConfig -func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { - mergedClientConfig, err := config.createClientConfig() - if err != nil { - return nil, err - } - mergedConfig, err := mergedClientConfig.ClientConfig() - if err != nil { - return nil, err - } - // REMOVED: In-cluster service account configuration use. - - return mergedConfig, nil -} - -var ( - // DefaultCluster is the cluster config used when no other config is specified - // TODO: eventually apiserver should start on 443 and be secure by default - defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} - - // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name - envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} -) - -// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. -// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information -type directClientConfig struct { - config clientcmdConfig -} - -// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. -// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { - return &directClientConfig{config} -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. -// ClientConfig implements ClientConfig -func (config *directClientConfig) ClientConfig() (*restConfig, error) { - if err := config.ConfirmUsable(); err != nil { - return nil, err - } - - configAuthInfo := config.getAuthInfo() - configClusterInfo := config.getCluster() - - clientConfig := &restConfig{} - clientConfig.Host = configClusterInfo.Server - if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { - u.RawQuery = "" - u.Fragment = "" - clientConfig.Host = u.String() - } - - // only try to read the auth information if we are secure - if isConfigTransportTLS(*clientConfig) { - var err error - // REMOVED: Support for interactive fallback. - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) - if err != nil { - return nil, err - } - mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig) - - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { - return nil, err - } - mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig) - } - - return clientConfig, nil -} - -// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { - mergedConfig := &restConfig{} - - // configClusterInfo holds the information identify the server provided by .kubeconfig - configClientConfig := &restConfig{} - configClientConfig.CAFile = configClusterInfo.CertificateAuthority - configClientConfig.CAData = configClusterInfo.CertificateAuthorityData - configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify - mergo.MergeWithOverwrite(mergedConfig, configClientConfig) - - return mergedConfig, nil -} - -// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identifcation -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { - mergedConfig := &restConfig{} - - // blindly overwrite existing values based on precedence - if len(configAuthInfo.Token) > 0 { - mergedConfig.BearerToken = configAuthInfo.Token - } - if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { - mergedConfig.CertFile = configAuthInfo.ClientCertificate - mergedConfig.CertData = configAuthInfo.ClientCertificateData - mergedConfig.KeyFile = configAuthInfo.ClientKey - mergedConfig.KeyData = configAuthInfo.ClientKeyData - } - if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { - mergedConfig.Username = configAuthInfo.Username - mergedConfig.Password = configAuthInfo.Password - } - - // REMOVED: prompting for missing information. - return mergedConfig, nil -} - -// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser -func canIdentifyUser(config restConfig) bool { - return len(config.Username) > 0 || - (len(config.CertFile) > 0 || len(config.CertData) > 0) || - len(config.BearerToken) > 0 - -} - -// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func (config *directClientConfig) ConfirmUsable() error { - var validationErrors []error - validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) - validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) - // when direct client config is specified, and our only error is that no server is defined, we should - // return a standard "no config" error - if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { - return newErrConfigurationInvalid([]error{errEmptyConfig}) - } - return newErrConfigurationInvalid(validationErrors) -} - -// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. -func (config *directClientConfig) getContextName() string { - // REMOVED: overrides support - return config.config.CurrentContext -} - -// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. -func (config *directClientConfig) getAuthInfoName() string { - // REMOVED: overrides support - return config.getContext().AuthInfo -} - -// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. -func (config *directClientConfig) getClusterName() string { - // REMOVED: overrides support - return config.getContext().Cluster -} - -// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. -func (config *directClientConfig) getContext() clientcmdContext { - contexts := config.config.Contexts - contextName := config.getContextName() - - var mergedContext clientcmdContext - if configContext, exists := contexts[contextName]; exists { - mergo.MergeWithOverwrite(&mergedContext, configContext) - } - // REMOVED: overrides support - - return mergedContext -} - -var ( - errEmptyConfig = errors.New("no configuration has been provided") - // message is for consistency with old behavior - errEmptyCluster = errors.New("cluster has no server defined") -) - -// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. -// validateClusterInfo looks for conflicts and errors in the cluster info -func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { - var validationErrors []error - - if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { - return []error{errEmptyCluster} - } - - if len(clusterInfo.Server) == 0 { - if len(clusterName) == 0 { - validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) - } else { - validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) - } - } - // Make sure CA data and CA file aren't both specified - if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) - } - if len(clusterInfo.CertificateAuthority) != 0 { - clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) - } - } - - return validationErrors -} - -// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. -// validateAuthInfo looks for conflicts and errors in the auth info -func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { - var validationErrors []error - - usingAuthPath := false - methods := make([]string, 0, 3) - if len(authInfo.Token) != 0 { - methods = append(methods, "token") - } - if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { - methods = append(methods, "basicAuth") - } - - if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { - // Make sure cert data and file aren't both specified - if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) - } - // Make sure key data and file aren't both specified - if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) - } - // Make sure a key is specified - if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) - } - - if len(authInfo.ClientCertificate) != 0 { - clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) - } - } - if len(authInfo.ClientKey) != 0 { - clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) - } - } - } - - // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case - if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) - } - - return validationErrors -} - -// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. -func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { - authInfos := config.config.AuthInfos - authInfoName := config.getAuthInfoName() - - var mergedAuthInfo clientcmdAuthInfo - if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo) - } - // REMOVED: overrides support - - return mergedAuthInfo -} - -// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. -func (config *directClientConfig) getCluster() clientcmdCluster { - clusterInfos := config.config.Clusters - clusterInfoName := config.getClusterName() - - var mergedClusterInfo clientcmdCluster - mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster) - mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster) - if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo) - } - // REMOVED: overrides support - - return mergedClusterInfo -} - -// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregateErr []error - -// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. -// NewAggregate converts a slice of errors into an Aggregate interface, which -// is itself an implementation of the error interface. If the slice is empty, -// this returns nil. -// It will check if any of the element of input error list is nil, to avoid -// nil pointer panic when call Error(). -func newAggregate(errlist []error) error { - if len(errlist) == 0 { - return nil - } - // In case of input error list contains nil - var errs []error - for _, e := range errlist { - if e != nil { - errs = append(errs, e) - } - } - if len(errs) == 0 { - return nil - } - return aggregateErr(errs) -} - -// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. -// Error is part of the error interface. -func (agg aggregateErr) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) - } - result += "]" - return result -} - -// REMOVED: aggregateErr.Errors - -// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. -// errConfigurationInvalid is a set of errors indicating the configuration is invalid. -type errConfigurationInvalid []error - -var _ error = errConfigurationInvalid{} - -// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. - -// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. -func newErrConfigurationInvalid(errs []error) error { - switch len(errs) { - case 0: - return nil - default: - return errConfigurationInvalid(errs) - } -} - -// Error implements the error interface -func (e errConfigurationInvalid) Error() string { - return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) -} - -// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules -// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config -// Callers can put the chain together however they want, but we'd recommend: -// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath -// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present -type clientConfigLoadingRules struct { - Precedence []string -} - -// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load -// Load starts by running the MigrationRules and then -// takes the loading rules and returns a Config object based on following rules. -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice -// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. -// Read errors or files with non-deserializable content produce errors. -// The first file to set a particular map key wins and map key's value is never changed. -// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. -// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. -// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even -// non-conflicting entries from the second file's "red-user" are discarded. -// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder -// and only absolute file paths are returned. -func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { - errlist := []error{} - - kubeConfigFiles := []string{} - - // REMOVED: explicit path support - kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) - - kubeconfigs := []*clientcmdConfig{} - // read and cache the config files so that we only look at them once - for _, filename := range kubeConfigFiles { - if len(filename) == 0 { - // no work to do - continue - } - - config, err := loadFromFile(filename) - if os.IsNotExist(err) { - // skip missing files - continue - } - if err != nil { - errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename)) - continue - } - - kubeconfigs = append(kubeconfigs, config) - } - - // first merge all of our maps - mapConfig := clientcmdNewConfig() - for _, kubeconfig := range kubeconfigs { - mergo.MergeWithOverwrite(mapConfig, kubeconfig) - } - - // merge all of the struct values in the reverse order so that priority is given correctly - // errors are not added to the list the second time - nonMapConfig := clientcmdNewConfig() - for i := len(kubeconfigs) - 1; i >= 0; i-- { - kubeconfig := kubeconfigs[i] - mergo.MergeWithOverwrite(nonMapConfig, kubeconfig) - } - - // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and - // get the values we expect. - config := clientcmdNewConfig() - mergo.MergeWithOverwrite(config, mapConfig) - mergo.MergeWithOverwrite(config, nonMapConfig) - - // REMOVED: Possibility to skip this. - if err := resolveLocalPaths(config); err != nil { - errlist = append(errlist, err) - } - - return config, newAggregate(errlist) -} - -// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile -// LoadFromFile takes a filename and deserializes the contents into Config object -func loadFromFile(filename string) (*clientcmdConfig, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - config, err := load(kubeconfigBytes) - if err != nil { - return nil, err - } - - // set LocationOfOrigin on every Cluster, User, and Context - for key, obj := range config.AuthInfos { - obj.LocationOfOrigin = filename - config.AuthInfos[key] = obj - } - for key, obj := range config.Clusters { - obj.LocationOfOrigin = filename - config.Clusters[key] = obj - } - for key, obj := range config.Contexts { - obj.LocationOfOrigin = filename - config.Contexts[key] = obj - } - - if config.AuthInfos == nil { - config.AuthInfos = map[string]*clientcmdAuthInfo{} - } - if config.Clusters == nil { - config.Clusters = map[string]*clientcmdCluster{} - } - if config.Contexts == nil { - config.Contexts = map[string]*clientcmdContext{} - } - - return config, nil -} - -// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load -// Load takes a byte slice and deserializes the contents into Config object. -// Encapsulates deserialization without assuming the source is a file. -func load(data []byte) (*clientcmdConfig, error) { - config := clientcmdNewConfig() - // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) - if len(data) == 0 { - return config, nil - } - // Note: This does absolutely no kind/version checking or conversions. - data, err := yaml.YAMLToJSON(data) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, config); err != nil { - return nil, err - } - return config, nil -} - -// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. -// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin -// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without -// modification of its contents. -func resolveLocalPaths(config *clientcmdConfig) error { - for _, cluster := range config.Clusters { - if len(cluster.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) - if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) - } - - if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { - return err - } - } - for _, authInfo := range config.AuthInfos { - if len(authInfo.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) - if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) - } - - if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - } - - return nil -} - -// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. -func getClusterFileReferences(cluster *clientcmdCluster) []*string { - return []*string{&cluster.CertificateAuthority} -} - -// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. -func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { - return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} -} - -// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. -// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory -func resolvePaths(refs []*string, base string) error { - for _, ref := range refs { - // Don't resolve empty paths - if len(*ref) > 0 { - // Don't resolve absolute paths - if !filepath.IsAbs(*ref) { - *ref = filepath.Join(base, *ref) - } - } - } - return nil -} - -// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor. -// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config -// object. Note that a RESTClient may require fields that are optional when initializing a Client. -// A RESTClient created by this method is generic - it expects to operate on an API that follows -// the Kubernetes conventions, but may not be the Kubernetes API. -func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { - // REMOVED: Configurable GroupVersion, Codec - // REMOVED: Configurable versionedAPIPath - baseURL, err := defaultServerURLFor(config) - if err != nil { - return nil, nil, err - } - - transport, err := transportFor(config) - if err != nil { - return nil, nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - } - - // REMOVED: Configurable QPS, Burst, ContentConfig - // REMOVED: Actually returning a RESTClient object. - return baseURL, httpClient, nil -} - -// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL. -// DefaultServerURL converts a host, host:port, or URL string to the default base server API path -// to use with a Client at a given API version following the standard conventions for a -// Kubernetes API. -func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { - if host == "" { - return nil, errors.Errorf("host must be a URL or a host:port pair") - } - base := host - hostURL, err := url.Parse(base) - if err != nil { - return nil, err - } - if hostURL.Scheme == "" { - scheme := "http://" - if defaultTLS { - scheme = "https://" - } - hostURL, err = url.Parse(scheme + base) - if err != nil { - return nil, err - } - if hostURL.Path != "" && hostURL.Path != "/" { - return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) - } - } - - // REMOVED: versionedAPIPath computation. - return hostURL, nil -} - -// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor. -// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It -// requires Host and Version to be set prior to being called. -func defaultServerURLFor(config *restConfig) (*url.URL, error) { - // TODO: move the default to secure when the apiserver supports TLS by default - // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." - hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 - hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 - defaultTLS := hasCA || hasCert || config.Insecure - host := config.Host - if host == "" { - host = "localhost" - } - - // REMOVED: Configurable APIPath, GroupVersion - return defaultServerURL(host, defaultTLS) -} - -// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor. -// TransportFor returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. Will return the -// default http.DefaultTransport if no special case behavior is needed. -func transportFor(config *restConfig) (http.RoundTripper, error) { - // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support - return transportNew(config) -} - -// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS. -// IsConfigTransportTLS returns true if and only if the provided -// config will result in a protected connection to the server when it -// is passed to restclient.RESTClientFor(). Use to determine when to -// send credentials over the wire. -// -// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are -// still possible. -func isConfigTransportTLS(config restConfig) bool { - baseURL, err := defaultServerURLFor(&config) - if err != nil { - return false - } - return baseURL.Scheme == "https" -} - -// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. -// New returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. -func transportNew(config *restConfig) (http.RoundTripper, error) { - // REMOVED: custom config.Transport support. - // Set transport level security - - var ( - rt http.RoundTripper - err error - ) - - rt, err = tlsCacheGet(config) - if err != nil { - return nil, err - } - - // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. - if len(config.Username) != 0 && len(config.BearerToken) != 0 { - return nil, errors.Errorf("username/password or bearer token may be set, but not both") - } - - return rt, nil -} - -// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. -// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if -// no matching CIDRs are found -func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it - noProxyEnv := os.Getenv("NO_PROXY") - noProxyRules := strings.Split(noProxyEnv, ",") - - cidrs := []*net.IPNet{} - for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) - } - } - - if len(cidrs) == 0 { - return delegate - } - - return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) - if ip == nil { - return delegate(req) - } - - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } - } - - return delegate(req) - } -} - -// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. -func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { - // REMOVED: any actual caching - - // Get the TLS options for this client config - tlsConfig, err := tlsConfigFor(config) - if err != nil { - return nil, err - } - // The options didn't require a custom TLS config - if tlsConfig == nil { - return http.DefaultTransport, nil - } - - // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. - t := &http.Transport{ - // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings - // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY - Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - } - // Allow clients to disable http2 if needed. - if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { - _ = http2.ConfigureTransport(t) - } - return t, nil -} - -// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. -// TLSConfigFor returns a tls.Config that will provide the transport level security defined -// by the provided Config. Will return nil if no transport level security is requested. -func tlsConfigFor(c *restConfig) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { - return nil, nil - } - if c.HasCA() && c.Insecure { - return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") - } - if err := loadTLSFiles(c); err != nil { - return nil, err - } - - tlsConfig := &tls.Config{ - // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: c.Insecure, - } - - if c.HasCA() { - tlsConfig.RootCAs = rootCertPool(c.CAData) - } - - if c.HasCertAuth() { - cert, err := tls.X509KeyPair(c.CertData, c.KeyData) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - return tlsConfig, nil -} - -// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. -// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, -// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are -// either populated or were empty to start. -func loadTLSFiles(c *restConfig) error { - var err error - c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) - if err != nil { - return err - } - - c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) - if err != nil { - return err - } - - c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) - if err != nil { - return err - } - return nil -} - -// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. -// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, -// or an error if an error occurred reading the file -func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { - if len(data) > 0 { - return data, nil - } - if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, err - } - return fileData, nil - } - return nil, nil -} - -// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. -// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". -// When caData is not empty, it will be the ONLY information used in the CertPool. -func rootCertPool(caData []byte) *x509.CertPool { - // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go - // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values - // It doesn't allow trusting either/or, but hopefully that won't be an issue - if len(caData) == 0 { - return nil - } - - // if we have caData, use it - certPool := x509.NewCertPool() - certPool.AppendCertsFromPEM(caData) - return certPool -} - -// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. -// HasCA returns whether the configuration has a certificate authority or not. -func (c *restConfig) HasCA() bool { - return len(c.CAData) > 0 || len(c.CAFile) > 0 -} - -// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. -// HasCertAuth returns whether the configuration has certificate authentication or not. -func (c *restConfig) HasCertAuth() bool { - return len(c.CertData) != 0 || len(c.CertFile) != 0 -} - -// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() -type clientcmdConfig struct { - // Clusters is a map of referencable names to cluster configs - Clusters clustersMap `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos authInfosMap `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts contextsMap `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` -} - -type clustersMap map[string]*clientcmdCluster - -func (m *clustersMap) UnmarshalJSON(data []byte) error { - var a []v1NamedCluster - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - cluster := e.Cluster // Allocates a new instance in each iteration - (*m)[e.Name] = &cluster - } - return nil -} - -type authInfosMap map[string]*clientcmdAuthInfo - -func (m *authInfosMap) UnmarshalJSON(data []byte) error { - var a []v1NamedAuthInfo - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - authInfo := e.AuthInfo // Allocates a new instance in each iteration - (*m)[e.Name] = &authInfo - } - return nil -} - -type contextsMap map[string]*clientcmdContext - -func (m *contextsMap) UnmarshalJSON(data []byte) error { - var a []v1NamedContext - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - context := e.Context // Allocates a new instance in each iteration - (*m)[e.Name] = &context - } - return nil -} - -// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func clientcmdNewConfig() *clientcmdConfig { - return &clientcmdConfig{ - Clusters: make(map[string]*clientcmdCluster), - AuthInfos: make(map[string]*clientcmdAuthInfo), - Contexts: make(map[string]*clientcmdContext), - } -} - -// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. -// Cluster contains information about how to communicate with a kubernetes cluster -type clientcmdCluster struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` -} - -// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type clientcmdAuthInfo struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // ClientCertificate is the path to a client cert file for TLS. - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - ClientKeyData []byte `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - Token string `json:"token,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - Password string `json:"password,omitempty"` -} - -// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type clientcmdContext struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - Namespace string `json:"namespace,omitempty"` -} - -// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. -// NamedCluster relates nicknames to cluster information -type v1NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name"` - // Cluster holds the cluster information - Cluster clientcmdCluster `json:"cluster"` -} - -// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. -// NamedContext relates nicknames to context information -type v1NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name"` - // Context holds the context information - Context clientcmdContext `json:"context"` -} - -// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. -// NamedAuthInfo relates nicknames to auth information -type v1NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name"` - // AuthInfo holds the auth information - AuthInfo clientcmdAuthInfo `json:"user"` -} diff --git a/vendor/github.com/containers/image/v4/openshift/openshift.go b/vendor/github.com/containers/image/v4/openshift/openshift.go deleted file mode 100644 index 51fff6269..000000000 --- a/vendor/github.com/containers/image/v4/openshift/openshift.go +++ /dev/null @@ -1,562 +0,0 @@ -package openshift - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/containers/image/v4/docker" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/containers/image/v4/version" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// openshiftClient is configuration for dealing with a single image stream, for reading or writing. -type openshiftClient struct { - ref openshiftReference - baseURL *url.URL - // Values from Kubernetes configuration - httpClient *http.Client - bearerToken string // "" if not used - username string // "" if not used - password string // if username != "" -} - -// newOpenshiftClient creates a new openshiftClient for the specified reference. -func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { - // We have already done this parsing in ParseReference, but thrown away - // httpClient. So, parse again. - // (We could also rework/split restClientFor to "get base URL" to be done - // in ParseReference, and "get httpClient" to be done here. But until/unless - // we support non-default clusters, this is good enough.) - - // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. - cmdConfig := defaultClientConfig() - logrus.Debugf("cmdConfig: %#v", cmdConfig) - restConfig, err := cmdConfig.ClientConfig() - if err != nil { - return nil, err - } - // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) - logrus.Debugf("restConfig: %#v", restConfig) - baseURL, httpClient, err := restClientFor(restConfig) - if err != nil { - return nil, err - } - logrus.Debugf("URL: %#v", *baseURL) - - if httpClient == nil { - httpClient = http.DefaultClient - } - - return &openshiftClient{ - ref: ref, - baseURL: baseURL, - httpClient: httpClient, - bearerToken: restConfig.BearerToken, - username: restConfig.Username, - password: restConfig.Password, - }, nil -} - -// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. -func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { - url := *c.baseURL - url.Path = path - var requestBodyReader io.Reader - if requestBody != nil { - logrus.Debugf("Will send body: %s", requestBody) - requestBodyReader = bytes.NewReader(requestBody) - } - req, err := http.NewRequest(method, url.String(), requestBodyReader) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - if len(c.bearerToken) != 0 { - req.Header.Set("Authorization", "Bearer "+c.bearerToken) - } else if len(c.username) != 0 { - req.SetBasicAuth(c.username, c.password) - } - req.Header.Set("Accept", "application/json, */*") - req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) - if requestBody != nil { - req.Header.Set("Content-Type", "application/json") - } - - logrus.Debugf("%s %s", method, url.String()) - res, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - logrus.Debugf("Got body: %s", body) - // FIXME: Just throwing this useful information away only to try to guess later... - logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) - - var status status - statusValid := false - if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { - statusValid = true - } - - switch { - case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. - if statusValid && status.Status != "Success" { - return nil, errors.New(status.Message) - } - case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: - // OK. - default: - if statusValid { - return nil, errors.New(status.Message) - } - return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) - } - - return body, nil -} - -// getImage loads the specified image object. -func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) - body, err := c.doRequest(ctx, "GET", path, nil) - if err != nil { - return nil, err - } - // Note: This does absolutely no kind/version checking or conversions. - var isi imageStreamImage - if err := json.Unmarshal(body, &isi); err != nil { - return nil, err - } - return &isi.Image, nil -} - -// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; -// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. -func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { - parts := strings.SplitN(ref, "/", 2) - if len(parts) != 2 { - return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) - } - return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil -} - -type openshiftImageSource struct { - client *openshiftClient - // Values specific to this image - sys *types.SystemContext - // State - docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet - imageStreamImageName string // Resolved image identifier, or "" if not known yet -} - -// newImageSource creates a new ImageSource for the specified reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - return &openshiftImageSource{ - client: client, - sys: sys, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *openshiftImageSource) Reference() types.ImageReference { - return s.client.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *openshiftImageSource) Close() error { - if s.docker != nil { - err := s.docker.Close() - s.docker = nil - - return err - } - - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, "", err - } - return s.docker.GetManifest(ctx, instanceDigest) -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, 0, err - } - return s.docker.GetBlob(ctx, info, cache) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - var imageName string - if instanceDigest == nil { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, err - } - imageName = s.imageStreamImageName - } else { - imageName = instanceDigest.String() - } - image, err := s.client.getImage(ctx, imageName) - if err != nil { - return nil, err - } - var sigs [][]byte - for _, sig := range image.Signatures { - if sig.Type == imageSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} - -// ensureImageIsResolved sets up s.docker and s.imageStreamImageName -func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { - if s.docker != nil { - return nil - } - - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) - body, err := s.client.doRequest(ctx, "GET", path, nil) - if err != nil { - return err - } - // Note: This does absolutely no kind/version checking or conversions. - var is imageStream - if err := json.Unmarshal(body, &is); err != nil { - return err - } - var te *tagEvent - for _, tag := range is.Status.Tags { - if tag.Tag != s.client.ref.dockerReference.Tag() { - continue - } - if len(tag.Items) > 0 { - te = &tag.Items[0] - break - } - } - if te == nil { - return errors.Errorf("No matching tag found") - } - logrus.Debugf("tag event %#v", te) - dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) - if err != nil { - return err - } - logrus.Debugf("Resolved reference %#v", dockerRefString) - dockerRef, err := docker.ParseReference("//" + dockerRefString) - if err != nil { - return err - } - d, err := dockerRef.NewImageSource(ctx, s.sys) - if err != nil { - return err - } - s.docker = d - s.imageStreamImageName = te.Image - return nil -} - -type openshiftImageDestination struct { - client *openshiftClient - docker types.ImageDestination // The Docker Registry endpoint - // State - imageStreamImageName string // "" if not yet known -} - -// newImageDestination creates a new ImageDestination for the specified reference. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, - // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know - // the manifest digest at this point. - dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) - dockerRef, err := docker.ParseReference(dockerRefString) - if err != nil { - return nil, err - } - docker, err := dockerRef.NewImageDestination(ctx, sys) - if err != nil { - return nil, err - } - - return &openshiftImageDestination{ - client: client, - docker: docker, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *openshiftImageDestination) Reference() types.ImageReference { - return d.client.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *openshiftImageDestination) Close() error { - return d.docker.Close() -} - -func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { - return d.docker.SupportedManifestMIMETypes() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { - return d.docker.IgnoresEmbeddedDockerReference() -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte) error { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return err - } - d.imageStreamImageName = manifestDigest.String() - - return d.docker.PutManifest(ctx, m) -} - -func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - if d.imageStreamImageName == "" { - return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") - } - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures. - - if len(signatures) == 0 { - return nil // No need to even read the old state. - } - - image, err := d.client.getImage(ctx, d.imageStreamImageName) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range image.Signatures { - existingSigNames[sig.objectMeta.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range image.Signatures { - if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "Error generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - // Note: This does absolutely no kind/version checking or conversions. - sig := imageSignature{ - typeMeta: typeMeta{ - Kind: "ImageSignature", - APIVersion: "v1", - }, - objectMeta: objectMeta{Name: signatureName}, - Type: imageSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body) - if err != nil { - return err - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *openshiftImageDestination) Commit(ctx context.Context) error { - return d.docker.Commit(ctx) -} - -// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. -type imageStream struct { - Status imageStreamStatus `json:"status,omitempty"` -} -type imageStreamStatus struct { - DockerImageRepository string `json:"dockerImageRepository"` - Tags []namedTagEventList `json:"tags,omitempty"` -} -type namedTagEventList struct { - Tag string `json:"tag"` - Items []tagEvent `json:"items"` -} -type tagEvent struct { - DockerImageReference string `json:"dockerImageReference"` - Image string `json:"image"` -} -type imageStreamImage struct { - Image image `json:"image"` -} -type image struct { - objectMeta `json:"metadata,omitempty"` - DockerImageReference string `json:"dockerImageReference,omitempty"` - // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` - DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` - DockerImageManifest string `json:"dockerImageManifest,omitempty"` - // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` - Signatures []imageSignature `json:"signatures,omitempty"` -} - -const imageSignatureTypeAtomic string = "atomic" - -type imageSignature struct { - typeMeta `json:",inline"` - objectMeta `json:"metadata,omitempty"` - Type string `json:"type"` - Content []byte `json:"content"` - // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - // ImageIdentity string `json:"imageIdentity,omitempty"` - // SignedClaims map[string]string `json:"signedClaims,omitempty"` - // Created *unversioned.Time `json:"created,omitempty"` - // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` - // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` -} -type typeMeta struct { - Kind string `json:"kind,omitempty"` - APIVersion string `json:"apiVersion,omitempty"` -} -type objectMeta struct { - Name string `json:"name,omitempty"` - GenerateName string `json:"generateName,omitempty"` - Namespace string `json:"namespace,omitempty"` - SelfLink string `json:"selfLink,omitempty"` - ResourceVersion string `json:"resourceVersion,omitempty"` - Generation int64 `json:"generation,omitempty"` - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status -type status struct { - Status string `json:"status,omitempty"` - Message string `json:"message,omitempty"` - // Reason StatusReason `json:"reason,omitempty"` - // Details *StatusDetails `json:"details,omitempty"` - Code int32 `json:"code,omitempty"` -} diff --git a/vendor/github.com/containers/image/v4/openshift/openshift_transport.go b/vendor/github.com/containers/image/v4/openshift/openshift_transport.go deleted file mode 100644 index f00c94561..000000000 --- a/vendor/github.com/containers/image/v4/openshift/openshift_transport.go +++ /dev/null @@ -1,157 +0,0 @@ -package openshift - -import ( - "context" - "fmt" - "regexp" - "strings" - - "github.com/containers/image/v4/docker/policyconfiguration" - "github.com/containers/image/v4/docker/reference" - genericImage "github.com/containers/image/v4/image" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OpenShift registry-hosted images. -var Transport = openshiftTransport{} - -type openshiftTransport struct{} - -func (t openshiftTransport) Name() string { - return "atomic" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// Note that imageNameRegexp is namespace/stream:tag, this -// is HOSTNAME/namespace/stream:tag or parent prefixes. -// Keep this in sync with imageNameRegexp! -var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { - if scopeRegexp.FindStringIndex(scope) == nil { - return errors.Errorf("Invalid scope name %s", scope) - } - return nil -} - -// openshiftReference is an ImageReference for OpenShift images. -type openshiftReference struct { - dockerReference reference.NamedTagged - namespace string // Computed from dockerReference in advance. - stream string // Computed from dockerReference in advance. -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. -func ParseReference(ref string) (types.ImageReference, error) { - r, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) - } - tagged, ok := r.(reference.NamedTagged) - if !ok { - return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) - } - return NewReference(tagged) -} - -// NewReference returns an OpenShift reference for a reference.NamedTagged -func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { - r := strings.SplitN(reference.Path(dockerRef), "/", 3) - if len(r) != 2 { - return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", - reference.FamiliarString(dockerRef)) - } - return openshiftReference{ - namespace: r[0], - stream: r[1], - dockerReference: dockerRef, - }, nil -} - -func (ref openshiftReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref openshiftReference) StringWithinTransport() string { - return reference.FamiliarString(ref.dockerReference) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref openshiftReference) DockerReference() reference.Named { - return ref.dockerReference -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref openshiftReference) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) - if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref openshiftReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(sys, ref) - if err != nil { - return nil, err - } - return genericImage.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for atomic: images") -} diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_dest.go b/vendor/github.com/containers/image/v4/ostree/ostree_dest.go deleted file mode 100644 index 9e1436e29..000000000 --- a/vendor/github.com/containers/image/v4/ostree/ostree_dest.go +++ /dev/null @@ -1,504 +0,0 @@ -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "syscall" - "time" - "unsafe" - - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/containers/storage/pkg/archive" - "github.com/klauspost/pgzip" - "github.com/opencontainers/go-digest" - selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/ostreedev/ostree-go/pkg/otbuiltin" - "github.com/pkg/errors" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -type blobToImport struct { - Size int64 - Digest digest.Digest - BlobPath string -} - -type descriptor struct { - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` -} - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type manifestSchema struct { - LayersDescriptors []descriptor `json:"layers"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` -} - -type ostreeImageDestination struct { - ref ostreeReference - manifest string - schema manifestSchema - tmpDirPath string - blobs map[string]*blobToImport - digest digest.Digest - signaturesLen int - repo *C.struct_OstreeRepo -} - -// newImageDestination returns an ImageDestination for writing to an existing ostree. -func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { - tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) - if err := ensureDirectoryExists(tmpDirPath); err != nil { - return nil, err - } - return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ostreeImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ostreeImageDestination) Close() error { - if d.repo != nil { - C.g_object_unref(C.gpointer(d.repo)) - } - return os.RemoveAll(d.tmpDirPath) -} - -func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.PreserveOriginal -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { - return true -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") - if err != nil { - return types.BlobInfo{}, err - } - - blobPath := filepath.Join(tmpDir, "content") - blobFile, err := os.Create(blobPath) - if err != nil { - return types.BlobInfo{}, err - } - defer blobFile.Close() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - - hash := computedDigest.Hex() - d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { - entries, err := ioutil.ReadDir(dir) - if err != nil { - return err - } - - for _, info := range entries { - fullpath := filepath.Join(dir, info.Name()) - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { - if err := os.Remove(fullpath); err != nil { - return err - } - continue - } - - if selinuxHnd != nil { - relPath, err := filepath.Rel(root, fullpath) - if err != nil { - return err - } - // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, - // thus we benefit from maintaining the same SELinux label they would have on the host as we could - // use hard links instead of copying the files. - relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) - - relPathC := C.CString(relPath) - defer C.free(unsafe.Pointer(relPathC)) - var context *C.char - - res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) - if int(res) < 0 && err != syscall.ENOENT { - return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) - } - if int(res) == 0 { - defer C.freecon(context) - fullpathC := C.CString(fullpath) - defer C.free(unsafe.Pointer(fullpathC)) - res, err = C.lsetfilecon_raw(fullpathC, context) - if int(res) < 0 { - return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context)) - } - } - } - - if info.IsDir() { - if usermode { - if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { - return err - } - } - err = fixFiles(selinuxHnd, root, fullpath, usermode) - if err != nil { - return err - } - } else if usermode && (info.Mode().IsRegular()) { - if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { - return err - } - } - } - - return nil -} - -func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { - opts := otbuiltin.NewCommitOptions() - opts.AddMetadataString = metadata - opts.Timestamp = time.Now() - // OCI layers have no parent OSTree commit - opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" - _, err := repo.Commit(root, branch, opts) - return err -} - -func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { - mfz := pgzip.NewWriter(output) - defer mfz.Close() - metaPacker := storage.NewJSONPacker(mfz) - - stream, err := os.OpenFile(file, os.O_RDONLY, 0) - if err != nil { - return "", -1, err - } - defer stream.Close() - - gzReader, err := archive.DecompressStream(stream) - if err != nil { - return "", -1, err - } - defer gzReader.Close() - - its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) - if err != nil { - return "", -1, err - } - - digester := digest.Canonical.Digester() - - written, err := io.Copy(digester.Hash(), its) - if err != nil { - return "", -1, err - } - - return digester.Digest(), written, nil -} - -func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") - if err := ensureDirectoryExists(destinationPath); err != nil { - return err - } - defer func() { - os.Remove(blob.BlobPath) - os.RemoveAll(destinationPath) - }() - - var tarSplitOutput bytes.Buffer - uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) - if err != nil { - return err - } - - if os.Getuid() == 0 { - if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { - return err - } - if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { - return err - } - } else { - os.MkdirAll(destinationPath, 0755) - if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { - return err - } - - if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { - return err - } - } - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), - fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), - fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), - fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) - -} - -func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - destinationPath := filepath.Dir(blob.BlobPath) - - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if d.repo == nil { - repo, err := openRepo(d.ref.repo) - if err != nil { - return false, types.BlobInfo{}, err - } - d.repo = repo - } - branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) - - found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") - if err != nil || !found { - return found, types.BlobInfo{}, err - } - - found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") - if err != nil || !found { - return found, types.BlobInfo{}, err - } - - found, data, err = readMetadata(d.repo, branch, "docker.size") - if err != nil || !found { - return found, types.BlobInfo{}, err - } - - size, err := strconv.ParseInt(data, 10, 64) - if err != nil { - return false, types.BlobInfo{}, err - } - - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { - d.manifest = string(manifestBlob) - - if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { - return err - } - - manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) - if err := ensureParentDirectoryExists(manifestPath); err != nil { - return err - } - - digest, err := manifest.Digest(manifestBlob) - if err != nil { - return err - } - d.digest = digest - - return ioutil.WriteFile(manifestPath, manifestBlob, 0644) -} - -func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) - if err := ensureParentDirectoryExists(path); err != nil { - return err - } - - for i, sig := range signatures { - signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { - return err - } - } - d.signaturesLen = len(signatures) - return nil -} - -func (d *ostreeImageDestination) Commit(ctx context.Context) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - repo, err := otbuiltin.OpenRepo(d.ref.repo) - if err != nil { - return err - } - - _, err = repo.PrepareTransaction() - if err != nil { - return err - } - - var selinuxHnd *C.struct_selabel_handle - - if os.Getuid() == 0 && selinux.GetEnabled() { - selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) - if selinuxHnd == nil { - return errors.Wrapf(err, "cannot open the SELinux DB") - } - - defer C.selabel_close(selinuxHnd) - } - - checkLayer := func(hash string) error { - blob := d.blobs[hash] - // if the blob is not present in d.blobs then it is already stored in OSTree, - // and we don't need to import it. - if blob == nil { - return nil - } - err := d.importBlob(selinuxHnd, repo, blob) - if err != nil { - return err - } - - delete(d.blobs, hash) - return nil - } - for _, layer := range d.schema.LayersDescriptors { - hash := layer.Digest.Hex() - if err = checkLayer(hash); err != nil { - return err - } - } - for _, layer := range d.schema.FSLayers { - hash := layer.BlobSum.Hex() - if err = checkLayer(hash); err != nil { - return err - } - } - - // Import the other blobs that are not layers - for _, blob := range d.blobs { - err := d.importConfig(repo, blob) - if err != nil { - return err - } - } - - manifestPath := filepath.Join(d.tmpDirPath, "manifest") - - metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), - fmt.Sprintf("signatures=%d", d.signaturesLen), - fmt.Sprintf("docker.digest=%s", string(d.digest))} - if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { - return err - } - - _, err = repo.CommitTransaction() - return err -} - -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_src.go b/vendor/github.com/containers/image/v4/ostree/ostree_src.go deleted file mode 100644 index ecb6e3f84..000000000 --- a/vendor/github.com/containers/image/v4/ostree/ostree_src.go +++ /dev/null @@ -1,416 +0,0 @@ -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - "unsafe" - - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/containers/storage/pkg/ioutils" - "github.com/klauspost/pgzip" - digest "github.com/opencontainers/go-digest" - glib "github.com/ostreedev/ostree-go/pkg/glibobject" - "github.com/pkg/errors" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -type ostreeImageSource struct { - ref ostreeReference - tmpDir string - repo *C.struct_OstreeRepo - // get the compressed layer by its uncompressed checksum - compressed map[digest.Digest]digest.Digest -} - -// newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { - return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil -} - -// Reference returns the reference used to set up this source. -func (s *ostreeImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *ostreeImageSource) Close() error { - if s.repo != nil { - C.g_object_unref(C.gpointer(s.repo)) - } - return nil -} - -func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { - var metadataKey string - if isCompressed { - metadataKey = "docker.uncompressed_size" - } else { - metadataKey = "docker.size" - } - b := fmt.Sprintf("ociimage/%s", blob) - found, data, err := readMetadata(s.repo, b, metadataKey) - if err != nil || !found { - return 0, err - } - return strconv.ParseInt(data, 10, 64) -} - -func (s *ostreeImageSource) getLenSignatures() (int64, error) { - b := fmt.Sprintf("ociimage/%s", s.ref.branchName) - found, data, err := readMetadata(s.repo, b, "signatures") - if err != nil { - return -1, err - } - if !found { - // if 'signatures' is not present, just return 0 signatures. - return 0, nil - } - return strconv.ParseInt(data, 10, 64) -} - -func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { - b := fmt.Sprintf("ociimage/%s", blob) - found, out, err := readMetadata(s.repo, b, "tarsplit.output") - if err != nil || !found { - return nil, err - } - return base64.StdEncoding.DecodeString(out) -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) - } - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, "", err - } - s.repo = repo - } - - b := fmt.Sprintf("ociimage/%s", s.ref.branchName) - found, out, err := readMetadata(s.repo, b, "docker.manifest") - if err != nil { - return nil, "", err - } - if !found { - return nil, "", errors.New("manifest not found") - } - m := []byte(out) - return m, manifest.GuessMIMEType(m), nil -} - -func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return nil, "", errors.New("manifest lists are not supported by this transport") -} - -func openRepo(path string) (*C.struct_OstreeRepo, error) { - var cerr *C.GError - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - pathc := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(pathc)) - repo := C.ostree_repo_new(pathc) - r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) - if !r { - C.g_object_unref(C.gpointer(repo)) - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - return repo, nil -} - -type ostreePathFileGetter struct { - repo *C.struct_OstreeRepo - parentRoot *C.GFile -} - -type ostreeReader struct { - stream *C.GFileInputStream -} - -func (o ostreeReader) Close() error { - C.g_object_unref(C.gpointer(o.stream)) - return nil -} -func (o ostreeReader) Read(p []byte) (int, error) { - var cerr *C.GError - instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) - stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) - - b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) - if b == nil { - return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - defer C.g_bytes_unref(b) - - count := int(C.g_bytes_get_size(b)) - if count == 0 { - return 0, io.EOF - } - data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] - copy(p, data) - return count, nil -} - -func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { - var cerr *C.GError - var ref *C.char - defer C.free(unsafe.Pointer(ref)) - - cCommit := C.CString(commit) - defer C.free(unsafe.Pointer(cCommit)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { - return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - if ref == nil { - return false, "", nil - } - - var variant *C.GVariant - if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { - return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - defer C.g_variant_unref(variant) - if variant != nil { - cKey := C.CString(key) - defer C.free(unsafe.Pointer(cKey)) - - metadata := C.g_variant_get_child_value(variant, 0) - defer C.g_variant_unref(metadata) - - data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) - if data != nil { - defer C.g_variant_unref(data) - ptr := (*C.char)(C.g_variant_get_string(data, nil)) - val := C.GoString(ptr) - return true, val, nil - } - } - return false, "", nil -} - -func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { - var cerr *C.GError - var parentRoot *C.GFile - cCommit := C.CString(commit) - defer C.free(unsafe.Pointer(cCommit)) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { - return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - C.g_object_ref(C.gpointer(repo)) - - return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil -} - -func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { - var file *C.GFile - if strings.HasPrefix(filename, "./") { - filename = filename[2:] - } - cfilename := C.CString(filename) - defer C.free(unsafe.Pointer(cfilename)) - - file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) - - var cerr *C.GError - stream := C.g_file_read(file, nil, &cerr) - if stream == nil { - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - return &ostreeReader{stream: stream}, nil -} - -func (o ostreePathFileGetter) Close() { - C.g_object_unref(C.gpointer(o.repo)) - C.g_object_unref(C.gpointer(o.parentRoot)) -} - -func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { - getter, err := newOSTreePathFileGetter(s.repo, commit) - if err != nil { - return nil, err - } - defer getter.Close() - - return getter.Get(path) -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - - blob := info.Digest.Hex() - - // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. - if s.compressed == nil { - _, err := s.LayerInfosForCopy(ctx) - if err != nil { - return nil, -1, err - } - - } - compressedBlob, isCompressed := s.compressed[info.Digest] - if isCompressed { - blob = compressedBlob.Hex() - } - branch := fmt.Sprintf("ociimage/%s", blob) - - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, 0, err - } - s.repo = repo - } - - layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) - if err != nil { - return nil, 0, err - } - - tarsplit, err := s.getTarSplitData(blob) - if err != nil { - return nil, 0, err - } - - // if tarsplit is nil we are looking at the manifest. Return directly the file in /content - if tarsplit == nil { - file, err := s.readSingleFile(branch, "/content") - if err != nil { - return nil, 0, err - } - return file, layerSize, nil - } - - mf := bytes.NewReader(tarsplit) - mfz, err := pgzip.NewReader(mf) - if err != nil { - return nil, 0, err - } - metaUnpacker := storage.NewJSONUnpacker(mfz) - - getter, err := newOSTreePathFileGetter(s.repo, branch) - if err != nil { - mfz.Close() - return nil, 0, err - } - - ots := asm.NewOutputTarStream(getter, metaUnpacker) - - rc := ioutils.NewReadCloserWrapper(ots, func() error { - getter.Close() - mfz.Close() - return ots.Close() - }) - return rc, layerSize, nil -} - -func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.New("manifest lists are not supported by this transport") - } - lenSignatures, err := s.getLenSignatures() - if err != nil { - return nil, err - } - branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) - - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, err - } - s.repo = repo - } - - signatures := [][]byte{} - for i := int64(1); i <= lenSignatures; i++ { - sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) - if err != nil { - return nil, err - } - defer sigReader.Close() - - sig, err := ioutil.ReadAll(sigReader) - if err != nil { - return nil, err - } - signatures = append(signatures, sig) - } - return signatures, nil -} - -// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of -// the image, after they've been decompressed. -func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - updatedBlobInfos := []types.BlobInfo{} - manifestBlob, manifestType, err := s.GetManifest(ctx, nil) - if err != nil { - return nil, err - } - - man, err := manifest.FromBlob(manifestBlob, manifestType) - - s.compressed = make(map[digest.Digest]digest.Digest) - - layerBlobs := man.LayerInfos() - - for _, layerBlob := range layerBlobs { - branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) - found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") - if err != nil || !found { - return nil, err - } - - found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") - if err != nil || !found { - return nil, err - } - - uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) - if err != nil { - return nil, err - } - uncompressedDigest := digest.Digest(uncompressedDigestStr) - blobInfo := types.BlobInfo{ - Digest: uncompressedDigest, - Size: uncompressedSize, - MediaType: layerBlob.MediaType, - } - s.compressed[uncompressedDigest] = layerBlob.Digest - updatedBlobInfos = append(updatedBlobInfos, blobInfo) - } - return updatedBlobInfos, nil -} diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_transport.go b/vendor/github.com/containers/image/v4/ostree/ostree_transport.go deleted file mode 100644 index d720cb7ac..000000000 --- a/vendor/github.com/containers/image/v4/ostree/ostree_transport.go +++ /dev/null @@ -1,252 +0,0 @@ -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/containers/image/v4/directory/explicitfilepath" - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -const defaultOSTreeRepo = "/ostree/repo" - -// Transport is an ImageTransport for ostree paths. -var Transport = ostreeTransport{} - -type ostreeTransport struct{} - -func (t ostreeTransport) Name() string { - return "ostree" -} - -func init() { - transports.Register(Transport) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { - sep := strings.Index(scope, ":") - if sep < 0 { - return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) - } - repo := scope[:sep] - - if !strings.HasPrefix(repo, "/") { - return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) - } - cleaned := filepath.Clean(repo) - if cleaned != repo { - return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - - // FIXME? In the namespaces within a repo, - // we could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// ostreeReference is an ImageReference for ostree paths. -type ostreeReference struct { - image string - branchName string - repo string -} - -type ostreeImageCloser struct { - types.ImageCloser - size int64 -} - -func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { - var repo = "" - var image = "" - s := strings.SplitN(ref, "@/", 2) - if len(s) == 1 { - image, repo = s[0], defaultOSTreeRepo - } else { - image, repo = s[0], "/"+s[1] - } - - return NewReference(image, repo) -} - -// NewReference returns an OSTree reference for a specified repo and image. -func NewReference(image string, repo string) (types.ImageReference, error) { - // image is not _really_ in a containers/image/docker/reference format; - // as far as the libOSTree ociimage/* namespace is concerned, it is more or - // less an arbitrary string with an implied tag. - // Parse the image using reference.ParseNormalizedNamed so that we can - // check whether the images has a tag specified and we can add ":latest" if needed - ostreeImage, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if reference.IsNameOnly(ostreeImage) { - image = image + ":latest" - } - - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) - if err != nil { - // With os.IsNotExist(err), the parent directory of repo is also not existent; - // that should ordinarily not happen, but it would be a bit weird to reject - // references which do not specify a repo just because the implicit defaultOSTreeRepo - // does not exist. - if os.IsNotExist(err) && repo == defaultOSTreeRepo { - resolved = repo - } else { - return nil, err - } - } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) - } - - return ostreeReference{ - image: image, - branchName: encodeOStreeRef(image), - repo: resolved, - }, nil -} - -func (ref ostreeReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ostreeReference) StringWithinTransport() string { - return fmt.Sprintf("%s@%s", ref.image, ref.repo) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ostreeReference) DockerReference() reference.Named { - return nil -} - -func (ref ostreeReference) PolicyConfigurationIdentity() string { - return fmt.Sprintf("%s:%s", ref.repo, ref.image) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ostreeReference) PolicyConfigurationNamespaces() []string { - s := strings.SplitN(ref.image, ":", 2) - if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. - panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) - } - name := s[0] - res := []string{} - for { - res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} - -func (s *ostreeImageCloser) Size() (int64, error) { - return s.size, nil -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - src, err := newImageSource(tmpDir, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - return newImageSource(tmpDir, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - return newImageDestination(ref, tmpDir) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for ostree: images") -} - -var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) - -func encodeOStreeRef(in string) string { - var buffer bytes.Buffer - for i := range in { - sub := in[i : i+1] - if ostreeRefRegexp.MatchString(sub) { - buffer.WriteString(sub) - } else { - buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) - } - - } - return buffer.String() -} - -// manifestPath returns a path for the manifest within a ostree using our conventions. -func (ref ostreeReference) manifestPath() string { - return filepath.Join("manifest", "manifest.json") -} - -// signaturePath returns a path for a signature within a ostree using our conventions. -func (ref ostreeReference) signaturePath(index int) string { - return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) -} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go deleted file mode 100644 index 85eb7d6f1..000000000 --- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go +++ /dev/null @@ -1,332 +0,0 @@ -// Package boltdb implements a BlobInfoCache backed by BoltDB. -package boltdb - -import ( - "fmt" - "os" - "sync" - "time" - - "github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize" - "github.com/containers/image/v4/types" - bolt "github.com/etcd-io/bbolt" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -var ( - // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade - // we can simply start over with a different filename; update blobInfoCacheFilename. - - // FIXME: For CRI-O, does this need to hide information between different users? - - // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. - uncompressedDigestBucket = []byte("uncompressedDigest") - // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest - // (as a set of key=digest, value="" pairs) - digestByUncompressedBucket = []byte("digestByUncompressed") - // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing - // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). - knownLocationsBucket = []byte("knownLocations") -) - -// Concurrency: -// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely -// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. - -// pathLock contains a lock for a specific BoltDB database path. -type pathLock struct { - refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! - mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. -} - -var ( - // pathLocks contains a lock for each currently open file. - // This must be global so that independently created instances of boltDBCache exclude each other. - // The map is protected by pathLocksMutex. - // FIXME? Should this be based on device:inode numbers instead of paths instead? - pathLocks = map[string]*pathLock{} - pathLocksMutex = sync.Mutex{} -) - -// lockPath obtains the pathLock for path. -// The caller must call unlockPath eventually. -func lockPath(path string) { - pl := func() *pathLock { // A scope for defer - pathLocksMutex.Lock() - defer pathLocksMutex.Unlock() - pl, ok := pathLocks[path] - if ok { - pl.refCount++ - } else { - pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} - pathLocks[path] = pl - } - return pl - }() - pl.mutex.Lock() -} - -// unlockPath releases the pathLock for path. -func unlockPath(path string) { - pathLocksMutex.Lock() - defer pathLocksMutex.Unlock() - pl, ok := pathLocks[path] - if !ok { - // Should this return an error instead? BlobInfoCache ultimately ignores errors… - panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) - } - pl.mutex.Unlock() - pl.refCount-- - if pl.refCount == 0 { - delete(pathLocks, path) - } -} - -// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. -// -// Note that we don’t keep the database open across operations, because that would lock the file and block any other -// users; instead, we need to open/close it for every single write or lookup. -type cache struct { - path string -} - -// New returns a BlobInfoCache implementation which uses a BoltDB file at path. -// -// Most users should call blobinfocache.DefaultCache instead. -func New(path string) types.BlobInfoCache { - return &cache{path: path} -} - -// view returns runs the specified fn within a read-only transaction on the database. -func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { - // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, - // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding - // a read lock, blocking any future writes. - // Hence this preliminary check, which is RACY: Another process could remove the file - // between the Lstat call and opening the database. - if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { - return err - } - - lockPath(bdc.path) - defer unlockPath(bdc.path) - db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer func() { - if err := db.Close(); retErr == nil && err != nil { - retErr = err - } - }() - - return db.View(fn) -} - -// update returns runs the specified fn within a read-write transaction on the database. -func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { - lockPath(bdc.path) - defer unlockPath(bdc.path) - db, err := bolt.Open(bdc.path, 0600, nil) - if err != nil { - return err - } - defer func() { - if err := db.Close(); retErr == nil && err != nil { - retErr = err - } - }() - - return db.Update(fn) -} - -// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. -func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { - if b := tx.Bucket(uncompressedDigestBucket); b != nil { - if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { - d, err := digest.Parse(string(uncompressedBytes)) - if err == nil { - return d - } - // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - } - // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. - // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings - // when we already record a (compressed, uncompressed) pair. - if b := tx.Bucket(digestByUncompressedBucket); b != nil { - if b = b.Bucket([]byte(anyDigest.String())); b != nil { - c := b.Cursor() - if k, _ := c.First(); k != nil { // The bucket is non-empty - return anyDigest - } - } - } - return "" -} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - var res digest.Digest - if err := bdc.view(func(tx *bolt.Tx) error { - res = bdc.uncompressedDigest(tx, anyDigest) - return nil - }); err != nil { // Including os.IsNotExist(err) - return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - return res -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) - if err != nil { - return err - } - key := []byte(anyDigest.String()) - if previousBytes := b.Get(key); previousBytes != nil { - previous, err := digest.Parse(string(previousBytes)) - if err != nil { - return err - } - if previous != uncompressed { - logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) - } - } - if err := b.Put(key, []byte(uncompressed.String())); err != nil { - return err - } - - b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) - if err != nil { - return err - } - if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. - return err - } - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) - if err != nil { - return err - } - value, err := time.Now().MarshalBinary() - if err != nil { - return err - } - if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. - return err - } - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { - b := scopeBucket.Bucket([]byte(digest.String())) - if b == nil { - return candidates - } - _ = b.ForEach(func(k, v []byte) error { - t := time.Time{} - if err := t.UnmarshalBinary(v); err != nil { - return err - } - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: types.BICLocationReference{Opaque: string(k)}, - }, - LastSeen: t, - }) - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? - return candidates -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - res := []prioritize.CandidateWithTime{} - var uncompressedDigestValue digest.Digest // = "" - if err := bdc.view(func(tx *bolt.Tx) error { - scopeBucket := tx.Bucket(knownLocationsBucket) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) - if scopeBucket == nil { - return nil - } - - res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) - if canSubstitute { - if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { - b := tx.Bucket(digestByUncompressedBucket) - if b != nil { - b = b.Bucket([]byte(uncompressedDigestValue.String())) - if b != nil { - if err := b.ForEach(func(k, _ []byte) error { - d, err := digest.Parse(string(k)) - if err != nil { - return err - } - if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, d) - } - return nil - }); err != nil { - return err - } - } - } - if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) - } - } - } - return nil - }); err != nil { // Including os.IsNotExist(err) - return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - - return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) -} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go deleted file mode 100644 index af136c36d..000000000 --- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go +++ /dev/null @@ -1,75 +0,0 @@ -package blobinfocache - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - - "github.com/containers/image/v4/pkg/blobinfocache/boltdb" - "github.com/containers/image/v4/pkg/blobinfocache/memory" - "github.com/containers/image/v4/types" - "github.com/sirupsen/logrus" -) - -const ( - // blobInfoCacheFilename is the file name used for blob info caches. - // If the format changes in an incompatible way, increase the version number. - blobInfoCacheFilename = "blob-info-cache-v1.boltdb" - // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. - systemBlobInfoCacheDir = "/var/lib/containers/cache" -) - -// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid. -// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. -func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { - if sys != nil && sys.BlobInfoCacheDir != "" { - return sys.BlobInfoCacheDir, nil - } - - // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged - // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. - if euid == 0 { - if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil - } - return systemBlobInfoCacheDir, nil - } - - // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. - dataDir := os.Getenv("XDG_DATA_HOME") - if dataDir == "" { - home := os.Getenv("HOME") - if home == "" { - return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") - } - dataDir = filepath.Join(home, ".local", "share") - } - return filepath.Join(dataDir, "containers", "cache"), nil -} - -func getRootlessUID() int { - uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Geteuid() -} - -// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. -func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { - dir, err := blobInfoCacheDir(sys, getRootlessUID()) - if err != nil { - logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) - return memory.New() - } - path := filepath.Join(dir, blobInfoCacheFilename) - if err := os.MkdirAll(dir, 0700); err != nil { - logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) - return memory.New() - } - - logrus.Debugf("Using blob info cache at %s", path) - return boltdb.New(path) -} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go deleted file mode 100644 index 7820119b0..000000000 --- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go +++ /dev/null @@ -1,110 +0,0 @@ -// Package prioritize provides utilities for prioritizing locations in -// types.BlobInfoCache.CandidateLocations. -package prioritize - -import ( - "sort" - "time" - - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" -) - -// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, -// and therefore ultimately by types.BlobInfoCache.CandidateLocations. -// This is a heuristic/guess, and could well use a different value. -const replacementAttempts = 5 - -// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. -type CandidateWithTime struct { - Candidate types.BICReplacementCandidate // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) -} - -// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, -// along with the specially-treated digest values for the implementation of sort.Interface.Less -type candidateSortState struct { - cs []CandidateWithTime // The entries to sort - primaryDigest digest.Digest // The digest the user actually asked for - uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest -} - -func (css *candidateSortState) Len() int { - return len(css.cs) -} - -func (css *candidateSortState) Less(i, j int) bool { - xi := css.cs[i] - xj := css.cs[j] - - // primaryDigest entries come first, more recent first. - // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. - // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) - - // First, deal with the primaryDigest/uncompressedDigest cases: - if xi.Candidate.Digest != xj.Candidate.Digest { - // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter - if xi.Candidate.Digest == css.primaryDigest { - return true - } - if xj.Candidate.Digest == css.primaryDigest { - return false - } - if css.uncompressedDigest != "" { - if xi.Candidate.Digest == css.uncompressedDigest { - return false - } - if xj.Candidate.Digest == css.uncompressedDigest { - return true - } - } - } else { // xi.Candidate.Digest == xj.Candidate.Digest - // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time - if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { - return xi.LastSeen.After(xj.LastSeen) - } - } - - // Neither of the digests are primaryDigest/uncompressedDigest: - if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time - return xi.LastSeen.After(xj.LastSeen) - } - // Fall back to digest, if timestamps end up _exactly_ the same (how?!) - return xi.Candidate.Digest < xj.Candidate.Digest -} - -func (css *candidateSortState) Swap(i, j int) { - css.cs[i], css.cs[j] = css.cs[j], css.cs[i] -} - -// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the -// number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { - // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should - // compare equal. - sort.Sort(&candidateSortState{ - cs: cs, - primaryDigest: primaryDigest, - uncompressedDigest: uncompressedDigest, - }) - - resLength := len(cs) - if resLength > maxCandidates { - resLength = maxCandidates - } - res := make([]types.BICReplacementCandidate, resLength) - for i := range res { - res[i] = cs[i].Candidate - } - return res -} - -// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, -// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), -// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. -// -// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course -// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) -func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { - return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) -} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go deleted file mode 100644 index c51b9f5ce..000000000 --- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package memory implements an in-memory BlobInfoCache. -package memory - -import ( - "sync" - "time" - - "github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize" - "github.com/containers/image/v4/types" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -// locationKey only exists to make lookup in knownLocations easier. -type locationKey struct { - transport string - scope types.BICTransportScope - blobDigest digest.Digest -} - -// cache implements an in-memory-only BlobInfoCache -type cache struct { - mutex sync.Mutex - // The following fields can only be accessed with mutex held. - uncompressedDigests map[digest.Digest]digest.Digest - digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest - knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference -} - -// New returns a BlobInfoCache implementation which is in-memory only. -// -// This is primarily intended for tests, but also used as a fallback -// if blobinfocache.DefaultCache can’t determine, or set up, the -// location for a persistent cache. Most users should use -// blobinfocache.DefaultCache. instead of calling this directly. -// Manual users of types.{ImageSource,ImageDestination} might also use -// this instead of a persistent cache. -func New() types.BlobInfoCache { - return &cache{ - uncompressedDigests: map[digest.Digest]digest.Digest{}, - digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, - knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, - } -} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - mem.mutex.Lock() - defer mem.mutex.Unlock() - return mem.uncompressedDigestLocked(anyDigest) -} - -// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. -func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { - if d, ok := mem.uncompressedDigests[anyDigest]; ok { - return d - } - // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. - // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings - // when we already record a (compressed, uncompressed) pair. - if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { - return anyDigest - } - return "" -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { - mem.mutex.Lock() - defer mem.mutex.Unlock() - if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { - logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) - } - mem.uncompressedDigests[anyDigest] = uncompressed - - anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] - if !ok { - anyDigestSet = map[digest.Digest]struct{}{} - mem.digestsByUncompressed[uncompressed] = anyDigestSet - } - anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { - mem.mutex.Lock() - defer mem.mutex.Unlock() - key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} - locationScope, ok := mem.knownLocations[key] - if !ok { - locationScope = map[types.BICLocationReference]time.Time{} - mem.knownLocations[key] = locationScope - } - locationScope[location] = time.Now() // Possibly overwriting an older entry. -} - -// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { - locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present - for l, t := range locations { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: l, - }, - LastSeen: t, - }) - } - return candidates -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - mem.mutex.Lock() - defer mem.mutex.Unlock() - res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) - var uncompressedDigest digest.Digest // = "" - if canSubstitute { - if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { - otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map - for d := range otherDigests { - if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d) - } - } - if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) - } - } - } - return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) -} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go deleted file mode 100644 index c5ce29a45..000000000 --- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go +++ /dev/null @@ -1,49 +0,0 @@ -// Package none implements a dummy BlobInfoCache which records no data. -package none - -import ( - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" -) - -// noCache implements a dummy BlobInfoCache which records no data. -type noCache struct { -} - -// NoCache implements BlobInfoCache by not recording any data. -// -// This exists primarily for implementations of configGetter for -// Manifest.Inspect, because configs only have one representation. -// Any use of BlobInfoCache with blobs should usually use at least a -// short-lived cache, ideally blobinfocache.DefaultCache. -var NoCache types.BlobInfoCache = noCache{} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - return "" -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - return nil -} diff --git a/vendor/github.com/containers/image/v4/pkg/compression/compression.go b/vendor/github.com/containers/image/v4/pkg/compression/compression.go deleted file mode 100644 index fd2f21549..000000000 --- a/vendor/github.com/containers/image/v4/pkg/compression/compression.go +++ /dev/null @@ -1,149 +0,0 @@ -package compression - -import ( - "bytes" - "compress/bzip2" - "fmt" - "io" - "io/ioutil" - - "github.com/containers/image/v4/pkg/compression/internal" - "github.com/containers/image/v4/pkg/compression/types" - "github.com/klauspost/pgzip" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/ulikunitz/xz" -) - -// Algorithm is a compression algorithm that can be used for CompressStream. -type Algorithm = types.Algorithm - -var ( - // Gzip compression. - Gzip = internal.NewAlgorithm("gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) - // Bzip2 compression. - Bzip2 = internal.NewAlgorithm("bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) - // Xz compression. - Xz = internal.NewAlgorithm("Xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) - // Zstd compression. - Zstd = internal.NewAlgorithm("zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) - - compressionAlgorithms = map[string]Algorithm{ - Gzip.Name(): Gzip, - Bzip2.Name(): Bzip2, - Xz.Name(): Xz, - Zstd.Name(): Zstd, - } -) - -// AlgorithmByName returns the compressor by its name -func AlgorithmByName(name string) (Algorithm, error) { - algorithm, ok := compressionAlgorithms[name] - if ok { - return algorithm, nil - } - return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) -} - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc = internal.DecompressorFunc - -// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. -func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { - return pgzip.NewReader(r) -} - -// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. -func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { - return ioutil.NopCloser(bzip2.NewReader(r)), nil -} - -// XzDecompressor is a DecompressorFunc for the xz compression algorithm. -func XzDecompressor(r io.Reader) (io.ReadCloser, error) { - r, err := xz.NewReader(r) - if err != nil { - return nil, err - } - return ioutil.NopCloser(r), nil -} - -// gzipCompressor is a CompressorFunc for the gzip compression algorithm. -func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) { - if level != nil { - return pgzip.NewWriterLevel(r, *level) - } - return pgzip.NewWriter(r), nil -} - -// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. -func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) { - return nil, fmt.Errorf("bzip2 compression not supported") -} - -// xzCompressor is a CompressorFunc for the xz compression algorithm. -func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) { - return xz.NewWriter(r) -} - -// CompressStream returns the compressor by its name -func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { - return internal.AlgorithmCompressor(algo)(dest, level) -} - -// DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. -// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. -func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { - buffer := [8]byte{} - - n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. - // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. - return Algorithm{}, nil, nil, err - } - - var retAlgo Algorithm - var decompressor DecompressorFunc - for _, algo := range compressionAlgorithms { - if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) { - logrus.Debugf("Detected compression format %s", algo.Name()) - retAlgo = algo - decompressor = internal.AlgorithmDecompressor(algo) - break - } - } - if decompressor == nil { - logrus.Debugf("No compression detected") - } - - return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil -} - -// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. -// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. -func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { - _, d, r, e := DetectCompressionFormat(input) - return d, r, e -} - -// AutoDecompress takes a stream and returns an uncompressed version of the -// same stream. -// The caller must call Close() on the returned stream (even if the input does not need, -// or does not even support, closing!). -func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { - decompressor, stream, err := DetectCompression(stream) - if err != nil { - return nil, false, errors.Wrapf(err, "Error detecting compression") - } - var res io.ReadCloser - if decompressor != nil { - res, err = decompressor(stream) - if err != nil { - return nil, false, errors.Wrapf(err, "Error initializing decompression") - } - } else { - res = ioutil.NopCloser(stream) - } - return res, decompressor != nil, nil -} diff --git a/vendor/github.com/containers/image/v4/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v4/pkg/compression/internal/types.go deleted file mode 100644 index 6092a9517..000000000 --- a/vendor/github.com/containers/image/v4/pkg/compression/internal/types.go +++ /dev/null @@ -1,57 +0,0 @@ -package internal - -import "io" - -// CompressorFunc writes the compressed stream to the given writer using the specified compression level. -// The caller must call Close() on the stream (even if the input stream does not need closing!). -type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc func(io.Reader) (io.ReadCloser, error) - -// Algorithm is a compression algorithm that can be used for CompressStream. -type Algorithm struct { - name string - prefix []byte - decompressor DecompressorFunc - compressor CompressorFunc -} - -// NewAlgorithm creates an Algorithm instance. -// This function exists so that Algorithm instances can only be created by code that -// is allowed to import this internal subpackage. -func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { - return Algorithm{ - name: name, - prefix: prefix, - decompressor: decompressor, - compressor: compressor, - } -} - -// Name returns the name for the compression algorithm. -func (c Algorithm) Name() string { - return c.name -} - -// AlgorithmCompressor returns the compressor field of algo. -// This is a function instead of a public method so that it is only callable from by code -// that is allowed to import this internal subpackage. -func AlgorithmCompressor(algo Algorithm) CompressorFunc { - return algo.compressor -} - -// AlgorithmDecompressor returns the decompressor field of algo. -// This is a function instead of a public method so that it is only callable from by code -// that is allowed to import this internal subpackage. -func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { - return algo.decompressor -} - -// AlgorithmPrefix returns the prefix field of algo. -// This is a function instead of a public method so that it is only callable from by code -// that is allowed to import this internal subpackage. -func AlgorithmPrefix(algo Algorithm) []byte { - return algo.prefix -} diff --git a/vendor/github.com/containers/image/v4/pkg/compression/types/types.go b/vendor/github.com/containers/image/v4/pkg/compression/types/types.go deleted file mode 100644 index ea43dc8cd..000000000 --- a/vendor/github.com/containers/image/v4/pkg/compression/types/types.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -import ( - "github.com/containers/image/v4/pkg/compression/internal" -) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc = internal.DecompressorFunc - -// Algorithm is a compression algorithm provided and supported by pkg/compression. -// It can’t be supplied from the outside. -type Algorithm = internal.Algorithm diff --git a/vendor/github.com/containers/image/v4/pkg/compression/zstd.go b/vendor/github.com/containers/image/v4/pkg/compression/zstd.go deleted file mode 100644 index 962fe9676..000000000 --- a/vendor/github.com/containers/image/v4/pkg/compression/zstd.go +++ /dev/null @@ -1,59 +0,0 @@ -package compression - -import ( - "io" - - "github.com/klauspost/compress/zstd" -) - -type wrapperZstdDecoder struct { - decoder *zstd.Decoder -} - -func (w *wrapperZstdDecoder) Close() error { - w.decoder.Close() - return nil -} - -func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { - return w.decoder.DecodeAll(input, dst) -} - -func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { - return w.decoder.Read(p) -} - -func (w *wrapperZstdDecoder) Reset(r io.Reader) error { - return w.decoder.Reset(r) -} - -func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { - return w.decoder.WriteTo(wr) -} - -func zstdReader(buf io.Reader) (io.ReadCloser, error) { - decoder, err := zstd.NewReader(buf) - return &wrapperZstdDecoder{decoder: decoder}, err -} - -func zstdWriter(dest io.Writer) (io.WriteCloser, error) { - return zstd.NewWriter(dest) -} - -func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) { - el := zstd.EncoderLevelFromZstd(level) - return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) -} - -// zstdCompressor is a CompressorFunc for the zstd compression algorithm. -func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) { - if level == nil { - return zstdWriter(r) - } - return zstdWriterWithLevel(r, *level) -} - -// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. -func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { - return zstdReader(r) -} diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config.go b/vendor/github.com/containers/image/v4/pkg/docker/config/config.go deleted file mode 100644 index e720dc865..000000000 --- a/vendor/github.com/containers/image/v4/pkg/docker/config/config.go +++ /dev/null @@ -1,352 +0,0 @@ -package config - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/containers/image/v4/types" - helperclient "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/pkg/homedir" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerAuthConfig struct { - Auth string `json:"auth,omitempty"` -} - -type dockerConfigFile struct { - AuthConfigs map[string]dockerAuthConfig `json:"auths"` - CredHelpers map[string]string `json:"credHelpers,omitempty"` -} - -var ( - defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") - xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") - dockerHomePath = filepath.FromSlash(".docker/config.json") - dockerLegacyHomePath = ".dockercfg" - - enableKeyring = false - - // ErrNotLoggedIn is returned for users not logged into a registry - // that they are trying to logout of - ErrNotLoggedIn = errors.New("not logged in") - // ErrNotSupported is returned for unsupported methods - ErrNotSupported = errors.New("not supported") -) - -// SetAuthentication stores the username and password in the auth.json file -func SetAuthentication(sys *types.SystemContext, registry, username, password string) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - if ch, exists := auths.CredHelpers[registry]; exists { - return false, setAuthToCredHelper(ch, registry, username, password) - } - - // Set the credentials to kernel keyring if enableKeyring is true. - // The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms. - // Hence, we want to fall-back to using the authfile in case the keyring failed. - // However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring. - if enableKeyring { - err := setAuthToKernelKeyring(registry, username, password) - if err == nil { - logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username) - return false, nil - } - logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err) - } - creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - newCreds := dockerAuthConfig{Auth: creds} - auths.AuthConfigs[registry] = newCreds - return true, nil - }) -} - -// GetAuthentication returns the registry credentials stored in -// either auth.json file or .docker/config.json -// If an entry is not found empty strings are returned for the username and password -func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debug("Returning credentials from DockerAuthConfig") - return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil - } - - if enableKeyring { - username, password, err := getAuthFromKernelKeyring(registry) - if err == nil { - logrus.Debug("returning credentials from kernel keyring") - return username, password, nil - } - } - - dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath) - var paths []string - pathToAuth, err := getPathToAuth(sys) - if err == nil { - paths = append(paths, pathToAuth) - } else { - // Error means that the path set for XDG_RUNTIME_DIR does not exist - // but we don't want to completely fail in the case that the user is pulling a public image - // Logging the error as a warning instead and moving on to pulling the image - logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) - } - paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath) - - for _, path := range paths { - legacyFormat := path == dockerLegacyPath - username, password, err := findAuthentication(registry, path, legacyFormat) - if err != nil { - logrus.Debugf("Credentials not found") - return "", "", err - } - if username != "" && password != "" { - logrus.Debugf("Returning credentials from %s", path) - return username, password, nil - } - } - logrus.Debugf("Credentials not found") - return "", "", nil -} - -// RemoveAuthentication deletes the credentials stored in auth.json -func RemoveAuthentication(sys *types.SystemContext, registry string) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - // First try cred helpers. - if ch, exists := auths.CredHelpers[registry]; exists { - return false, deleteAuthFromCredHelper(ch, registry) - } - - // Next if keyring is enabled try kernel keyring - if enableKeyring { - err := deleteAuthFromKernelKeyring(registry) - if err == nil { - logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry) - return false, nil - } - logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles") - } - - if _, ok := auths.AuthConfigs[registry]; ok { - delete(auths.AuthConfigs, registry) - } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { - delete(auths.AuthConfigs, normalizeRegistry(registry)) - } else { - return false, ErrNotLoggedIn - } - return true, nil - }) -} - -// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring -func RemoveAllAuthentication(sys *types.SystemContext) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - if enableKeyring { - err := removeAllAuthFromKernelKeyring() - if err == nil { - logrus.Debugf("removing all credentials from kernel keyring") - return false, nil - } - logrus.Debugf("error removing credentials from kernel keyring") - } - auths.CredHelpers = make(map[string]string) - auths.AuthConfigs = make(map[string]dockerAuthConfig) - return true, nil - }) -} - -// getPath gets the path of the auth.json file -// The path can be overriden by the user if the overwrite-path flag is set -// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers -// Otherwise, the auth.json file is stored in /run/containers/UID -func getPathToAuth(sys *types.SystemContext) (string, error) { - if sys != nil { - if sys.AuthFilePath != "" { - return sys.AuthFilePath, nil - } - if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil - } - } - - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - if runtimeDir != "" { - // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. - // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. - _, err := os.Stat(runtimeDir) - if os.IsNotExist(err) { - // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory - // or made a typo while setting the environment variable, - // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. - return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) - } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. - return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil - } - return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil -} - -// readJSONFile unmarshals the authentications stored in the auth.json file and returns it -// or returns an empty dockerConfigFile data structure if auth.json does not exist -// if the file exists and is empty, readJSONFile returns an error -func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { - var auths dockerConfigFile - - raw, err := ioutil.ReadFile(path) - if err != nil { - if os.IsNotExist(err) { - auths.AuthConfigs = map[string]dockerAuthConfig{} - return auths, nil - } - return dockerConfigFile{}, err - } - - if legacyFormat { - if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { - return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) - } - return auths, nil - } - - if err = json.Unmarshal(raw, &auths); err != nil { - return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) - } - - return auths, nil -} - -// modifyJSON writes to auth.json if the dockerConfigFile has been updated -func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { - path, err := getPathToAuth(sys) - if err != nil { - return err - } - - dir := filepath.Dir(path) - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err = os.MkdirAll(dir, 0700); err != nil { - return errors.Wrapf(err, "error creating directory %q", dir) - } - } - - auths, err := readJSONFile(path, false) - if err != nil { - return errors.Wrapf(err, "error reading JSON file %q", path) - } - - updated, err := editor(&auths) - if err != nil { - return errors.Wrapf(err, "error updating %q", path) - } - if updated { - newData, err := json.MarshalIndent(auths, "", "\t") - if err != nil { - return errors.Wrapf(err, "error marshaling JSON %q", path) - } - - if err = ioutil.WriteFile(path, newData, 0755); err != nil { - return errors.Wrapf(err, "error writing to file %q", path) - } - } - - return nil -} - -func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds, err := helperclient.Get(p, registry) - if err != nil { - return "", "", err - } - return creds.Username, creds.Secret, nil -} - -func setAuthToCredHelper(credHelper, registry, username, password string) error { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds := &credentials.Credentials{ - ServerURL: registry, - Username: username, - Secret: password, - } - return helperclient.Store(p, creds) -} - -func deleteAuthFromCredHelper(credHelper, registry string) error { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - return helperclient.Erase(p, registry) -} - -// findAuthentication looks for auth of registry in path -func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { - auths, err := readJSONFile(path, legacyFormat) - if err != nil { - return "", "", errors.Wrapf(err, "error reading JSON file %q", path) - } - - // First try cred helpers. They should always be normalized. - if ch, exists := auths.CredHelpers[registry]; exists { - return getAuthFromCredHelper(ch, registry) - } - - // I'm feeling lucky - if val, exists := auths.AuthConfigs[registry]; exists { - return decodeDockerAuth(val.Auth) - } - - // bad luck; let's normalize the entries first - registry = normalizeRegistry(registry) - normalizedAuths := map[string]dockerAuthConfig{} - for k, v := range auths.AuthConfigs { - normalizedAuths[normalizeRegistry(k)] = v - } - if val, exists := normalizedAuths[registry]; exists { - return decodeDockerAuth(val.Auth) - } - return "", "", nil -} - -func decodeDockerAuth(s string) (string, string, error) { - decoded, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { - // if it's invalid just skip, as docker does - return "", "", nil - } - user := parts[0] - password := strings.Trim(parts[1], "\x00") - return user, password, nil -} - -// convertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry/auth.go -func convertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -func normalizeRegistry(registry string) string { - normalized := convertToHostname(registry) - switch normalized { - case "registry-1.docker.io", "docker.io": - return "index.docker.io" - } - return normalized -} diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go deleted file mode 100644 index 0cd73528b..000000000 --- a/vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go +++ /dev/null @@ -1,115 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/containers/image/v4/internal/pkg/keyctl" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const keyDescribePrefix = "container-registry-login:" - -func getAuthFromKernelKeyring(registry string) (string, string, error) { - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return "", "", err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return "", "", err - } - authData, err := key.Get() - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(authData), "\x00", 2) - if len(parts) != 2 { - return "", "", nil - } - return parts[0], parts[1], nil -} - -func deleteAuthFromKernelKeyring(registry string) error { - userkeyring, err := keyctl.UserKeyring() - - if err != nil { - return err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return err - } - return key.Unlink() -} - -func removeAllAuthFromKernelKeyring() error { - keys, err := keyctl.ReadUserKeyring() - if err != nil { - return err - } - - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return err - } - - for _, k := range keys { - keyAttr, err := k.Describe() - if err != nil { - return err - } - // split string "type;uid;gid;perm;description" - keyAttrs := strings.SplitN(keyAttr, ";", 5) - if len(keyAttrs) < 5 { - return errors.Errorf("Key attributes of %d are not avaliable", k.ID()) - } - keyDescribe := keyAttrs[4] - if strings.HasPrefix(keyDescribe, keyDescribePrefix) { - err := keyctl.Unlink(userkeyring, k) - if err != nil { - return errors.Wrapf(err, "error unlinking key %d", k.ID()) - } - logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) - } - } - return nil -} - -func setAuthToKernelKeyring(registry, username, password string) error { - keyring, err := keyctl.SessionKeyring() - if err != nil { - return err - } - id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) - if err != nil { - return err - } - - // sets all permission(view,read,write,search,link,set attribute) for current user - // it enables the user to search the key after it linked to user keyring and unlinked from session keyring - err = keyctl.SetPerm(id, keyctl.PermUserAll) - if err != nil { - return err - } - // link the key to userKeyring - userKeyring, err := keyctl.UserKeyring() - if err != nil { - return errors.Wrapf(err, "error getting user keyring") - } - err = keyctl.Link(userKeyring, id) - if err != nil { - return errors.Wrapf(err, "error linking the key to user keyring") - } - // unlink the key from session keyring - err = keyctl.Unlink(keyring, id) - if err != nil { - return errors.Wrapf(err, "error unlinking the key from session keyring") - } - return nil -} - -func genDescription(registry string) string { - return fmt.Sprintf("%s%s", keyDescribePrefix, registry) -} diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go deleted file mode 100644 index 9b0e8bee2..000000000 --- a/vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !linux -// +build !386 !amd64 - -package config - -func getAuthFromKernelKeyring(registry string) (string, string, error) { - return "", "", ErrNotSupported -} - -func deleteAuthFromKernelKeyring(registry string) error { - return ErrNotSupported -} - -func setAuthToKernelKeyring(registry, username, password string) error { - return ErrNotSupported -} - -func removeAllAuthFromKernelKeyring() error { - return ErrNotSupported -} diff --git a/vendor/github.com/containers/image/v4/pkg/strslice/README.md b/vendor/github.com/containers/image/v4/pkg/strslice/README.md deleted file mode 100644 index ae6097e82..000000000 --- a/vendor/github.com/containers/image/v4/pkg/strslice/README.md +++ /dev/null @@ -1 +0,0 @@ -This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/v4/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v4/pkg/strslice/strslice.go deleted file mode 100644 index bad493fb8..000000000 --- a/vendor/github.com/containers/image/v4/pkg/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go deleted file mode 100644 index 0cd60778f..000000000 --- a/vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go +++ /dev/null @@ -1,483 +0,0 @@ -package sysregistriesv2 - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strings" - "sync" - - "github.com/BurntSushi/toml" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/containers/image/v4/docker/reference" -) - -// systemRegistriesConfPath is the path to the system-wide registry -// configuration file and is used to add/subtract potential registries for -// obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' -var systemRegistriesConfPath = builtinRegistriesConfPath - -// builtinRegistriesConfPath is the path to the registry configuration file. -// DO NOT change this, instead see systemRegistriesConfPath above. -const builtinRegistriesConfPath = "/etc/containers/registries.conf" - -// Endpoint describes a remote location of a registry. -type Endpoint struct { - // The endpoint's remote location. - Location string `toml:"location,omitempty"` - // If true, certs verification will be skipped and HTTP (non-TLS) - // connections will be allowed. - Insecure bool `toml:"insecure,omitempty"` -} - -// rewriteReference will substitute the provided reference `prefix` to the -// endpoints `location` from the `ref` and creates a new named reference from it. -// The function errors if the newly created reference is not parsable. -func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { - refString := ref.String() - if !refMatchesPrefix(refString, prefix) { - return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) - } - - newNamedRef := strings.Replace(refString, prefix, e.Location, 1) - newParsedRef, err := reference.ParseNamed(newNamedRef) - if err != nil { - return nil, errors.Wrapf(err, "error rewriting reference") - } - logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String()) - return newParsedRef, nil -} - -// Registry represents a registry. -type Registry struct { - // Prefix is used for matching images, and to translate one namespace to - // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` - // and we pull from "example.com/bar/myimage:latest", the image will - // effectively be pulled from "example.com/foo/bar/myimage:latest". - // If no Prefix is specified, it defaults to the specified location. - Prefix string `toml:"prefix"` - // A registry is an Endpoint too - Endpoint - // The registry's mirrors. - Mirrors []Endpoint `toml:"mirror,omitempty"` - // If true, pulling from the registry will be blocked. - Blocked bool `toml:"blocked,omitempty"` - // If true, mirrors will only be used for digest pulls. Pulling images by - // tag can potentially yield different images, depending on which endpoint - // we pull from. Forcing digest-pulls for mirrors avoids that issue. - MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` -} - -// PullSource consists of an Endpoint and a Reference. Note that the reference is -// rewritten according to the registries prefix and the Endpoint's location. -type PullSource struct { - Endpoint Endpoint - Reference reference.Named -} - -// PullSourcesFromReference returns a slice of PullSource's based on the passed -// reference. -func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { - var endpoints []Endpoint - - if r.MirrorByDigestOnly { - // Only use mirrors when the reference is a digest one. - if _, isDigested := ref.(reference.Canonical); isDigested { - endpoints = append(r.Mirrors, r.Endpoint) - } else { - endpoints = []Endpoint{r.Endpoint} - } - } else { - endpoints = append(r.Mirrors, r.Endpoint) - } - - sources := []PullSource{} - for _, ep := range endpoints { - rewritten, err := ep.rewriteReference(ref, r.Prefix) - if err != nil { - return nil, err - } - sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) - } - - return sources, nil -} - -// V1TOMLregistries is for backwards compatibility to sysregistries v1 -type V1TOMLregistries struct { - Registries []string `toml:"registries"` -} - -// V1TOMLConfig is for backwards compatibility to sysregistries v1 -type V1TOMLConfig struct { - Search V1TOMLregistries `toml:"search"` - Insecure V1TOMLregistries `toml:"insecure"` - Block V1TOMLregistries `toml:"block"` -} - -// V1RegistriesConf is the sysregistries v1 configuration format. -type V1RegistriesConf struct { - V1TOMLConfig `toml:"registries"` -} - -// Nonempty returns true if config contains at least one configuration entry. -func (config *V1RegistriesConf) Nonempty() bool { - return (len(config.V1TOMLConfig.Search.Registries) != 0 || - len(config.V1TOMLConfig.Insecure.Registries) != 0 || - len(config.V1TOMLConfig.Block.Registries) != 0) -} - -// V2RegistriesConf is the sysregistries v2 configuration format. -type V2RegistriesConf struct { - Registries []Registry `toml:"registry"` - // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references - UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` -} - -// Nonempty returns true if config contains at least one configuration entry. -func (config *V2RegistriesConf) Nonempty() bool { - return (len(config.Registries) != 0 || - len(config.UnqualifiedSearchRegistries) != 0) -} - -// tomlConfig is the data type used to unmarshal the toml config. -type tomlConfig struct { - V2RegistriesConf - V1RegistriesConf // for backwards compatibility with sysregistries v1 -} - -// InvalidRegistries represents an invalid registry configurations. An example -// is when "registry.com" is defined multiple times in the configuration but -// with conflicting security settings. -type InvalidRegistries struct { - s string -} - -// Error returns the error string. -func (e *InvalidRegistries) Error() string { - return e.s -} - -// parseLocation parses the input string, performs some sanity checks and returns -// the sanitized input string. An error is returned if the input string is -// empty or if contains an "http{s,}://" prefix. -func parseLocation(input string) (string, error) { - trimmed := strings.TrimRight(input, "/") - - if trimmed == "" { - return "", &InvalidRegistries{s: "invalid location: cannot be empty"} - } - - if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { - msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) - return "", &InvalidRegistries{s: msg} - } - - return trimmed, nil -} - -// ConvertToV2 returns a v2 config corresponding to a v1 one. -func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { - regMap := make(map[string]*Registry) - // The order of the registries is not really important, but make it deterministic (the same for the same config file) - // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. - registryOrder := []string{} - - getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object - var err error - location, err = parseLocation(location) - if err != nil { - return nil, err - } - reg, exists := regMap[location] - if !exists { - reg = &Registry{ - Endpoint: Endpoint{Location: location}, - Mirrors: []Endpoint{}, - Prefix: location, - } - regMap[location] = reg - registryOrder = append(registryOrder, location) - } - return reg, nil - } - - for _, blocked := range config.V1TOMLConfig.Block.Registries { - reg, err := getRegistry(blocked) - if err != nil { - return nil, err - } - reg.Blocked = true - } - for _, insecure := range config.V1TOMLConfig.Insecure.Registries { - reg, err := getRegistry(insecure) - if err != nil { - return nil, err - } - reg.Insecure = true - } - - res := &V2RegistriesConf{ - UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, - } - for _, location := range registryOrder { - reg := regMap[location] - res.Registries = append(res.Registries, *reg) - } - return res, nil -} - -// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. -var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") - -// postProcess checks the consistency of all the configuration, looks for conflicts, -// and normalizes the configuration (e.g., sets the Prefix to Location if not set). -func (config *V2RegistriesConf) postProcess() error { - regMap := make(map[string][]*Registry) - - for i := range config.Registries { - reg := &config.Registries[i] - // make sure Location and Prefix are valid - var err error - reg.Location, err = parseLocation(reg.Location) - if err != nil { - return err - } - - if reg.Prefix == "" { - reg.Prefix = reg.Location - } else { - reg.Prefix, err = parseLocation(reg.Prefix) - if err != nil { - return err - } - } - - // make sure mirrors are valid - for _, mir := range reg.Mirrors { - mir.Location, err = parseLocation(mir.Location) - if err != nil { - return err - } - } - regMap[reg.Location] = append(regMap[reg.Location], reg) - } - - // Given a registry can be mentioned multiple times (e.g., to have - // multiple prefixes backed by different mirrors), we need to make sure - // there are no conflicts among them. - // - // Note: we need to iterate over the registries array to ensure a - // deterministic behavior which is not guaranteed by maps. - for _, reg := range config.Registries { - others, _ := regMap[reg.Location] - for _, other := range others { - if reg.Insecure != other.Insecure { - msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) - return &InvalidRegistries{s: msg} - } - if reg.Blocked != other.Blocked { - msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) - return &InvalidRegistries{s: msg} - } - } - } - - for i := range config.UnqualifiedSearchRegistries { - registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) - if err != nil { - return err - } - if !anchoredDomainRegexp.MatchString(registry) { - return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} - } - config.UnqualifiedSearchRegistries[i] = registry - } - - return nil -} - -// ConfigPath returns the path to the system-wide registry configuration file. -func ConfigPath(ctx *types.SystemContext) string { - confPath := systemRegistriesConfPath - if ctx != nil { - if ctx.SystemRegistriesConfPath != "" { - confPath = ctx.SystemRegistriesConfPath - } else if ctx.RootForImplicitAbsolutePaths != "" { - confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) - } - } - return confPath -} - -// configMutex is used to synchronize concurrent accesses to configCache. -var configMutex = sync.Mutex{} - -// configCache caches already loaded configs with config paths as keys and is -// used to avoid redudantly parsing configs. Concurrent accesses to the cache -// are synchronized via configMutex. -var configCache = make(map[string]*V2RegistriesConf) - -// InvalidateCache invalidates the registry cache. This function is meant to be -// used for long-running processes that need to reload potential changes made to -// the cached registry config files. -func InvalidateCache() { - configMutex.Lock() - defer configMutex.Unlock() - configCache = make(map[string]*V2RegistriesConf) -} - -// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. -func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { - configPath := ConfigPath(ctx) - - configMutex.Lock() - // if the config has already been loaded, return the cached registries - if config, inCache := configCache[configPath]; inCache { - configMutex.Unlock() - return config, nil - } - configMutex.Unlock() - - return TryUpdatingCache(ctx) -} - -// TryUpdatingCache loads the configuration from the provided `SystemContext` -// without using the internal cache. On success, the loaded configuration will -// be added into the internal registry cache. -func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { - configPath := ConfigPath(ctx) - - configMutex.Lock() - defer configMutex.Unlock() - - // load the config - config, err := loadRegistryConf(configPath) - if err != nil { - // Return an empty []Registry if we use the default config, - // which implies that the config path of the SystemContext - // isn't set. Note: if ctx.SystemRegistriesConfPath points to - // the default config, we will still return an error. - if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { - return &V2RegistriesConf{Registries: []Registry{}}, nil - } - return nil, err - } - - v2Config := &config.V2RegistriesConf - - // backwards compatibility for v1 configs - if config.V1RegistriesConf.Nonempty() { - if config.V2RegistriesConf.Nonempty() { - return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} - } - v2, err := config.V1RegistriesConf.ConvertToV2() - if err != nil { - return nil, err - } - v2Config = v2 - } - - if err := v2Config.postProcess(); err != nil { - return nil, err - } - - // populate the cache - configCache[configPath] = v2Config - return v2Config, nil -} - -// GetRegistries loads and returns the registries specified in the config. -// Note the parsed content of registry config files is cached. For reloading, -// use `InvalidateCache` and re-call `GetRegistries`. -func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - return config.Registries, nil -} - -// UnqualifiedSearchRegistries returns a list of host[:port] entries to try -// for unqualified image search, in the returned order) -func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - return config.UnqualifiedSearchRegistries, nil -} - -// refMatchesPrefix returns true iff ref, -// which is a registry, repository namespace, repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!), -// matches a Registry.Prefix value. -// (This is split from the caller primarily to make testing easier.) -func refMatchesPrefix(ref, prefix string) bool { - switch { - case len(ref) < len(prefix): - return false - case len(ref) == len(prefix): - return ref == prefix - case len(ref) > len(prefix): - if !strings.HasPrefix(ref, prefix) { - return false - } - c := ref[len(prefix)] - // This allows "example.com:5000" to match "example.com", - // which is unintended; that will get fixed eventually, DON'T RELY - // ON THE CURRENT BEHAVIOR. - return c == ':' || c == '/' || c == '@' - default: - panic("Internal error: impossible comparison outcome") - } -} - -// FindRegistry returns the Registry with the longest prefix for ref, -// which is a registry, repository namespace repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!). -// If no Registry prefixes the image, nil is returned. -func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - - reg := Registry{} - prefixLen := 0 - for _, r := range config.Registries { - if refMatchesPrefix(ref, r.Prefix) { - length := len(r.Prefix) - if length > prefixLen { - reg = r - prefixLen = length - } - } - } - if prefixLen != 0 { - return ®, nil - } - return nil, nil -} - -// Loads the registry configuration file from the filesystem and then unmarshals -// it. Returns the unmarshalled object. -func loadRegistryConf(configPath string) (*tomlConfig, error) { - config := &tomlConfig{} - - configBytes, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err - } - - err = toml.Unmarshal(configBytes, &config) - return config, err -} diff --git a/vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go deleted file mode 100644 index 6785564e8..000000000 --- a/vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go +++ /dev/null @@ -1,112 +0,0 @@ -package tlsclientconfig - -import ( - "crypto/tls" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc -func SetupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) - if err != nil { - if os.IsNotExist(err) { - // Dangling symbolic link? - // Race with someone who deleted the - // file after we read the directory's - // list of contents? - logrus.Warnf("error reading certificate %q: %v", fullPath, err) - continue - } - return err - } - if tlsc.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return errors.Wrap(err, "unable to get system cert pool") - } - tlsc.RootCAs = systemPool - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(tlsc.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// NewTransport Creates a default transport -func NewTransport() *http.Transport { - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - tr.Dial = proxyDialer.Dial - } - return tr -} diff --git a/vendor/github.com/containers/image/v4/signature/docker.go b/vendor/github.com/containers/image/v4/signature/docker.go deleted file mode 100644 index c3ac33d48..000000000 --- a/vendor/github.com/containers/image/v4/signature/docker.go +++ /dev/null @@ -1,65 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -package signature - -import ( - "fmt" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/manifest" - "github.com/opencontainers/go-digest" -) - -// SignDockerManifest returns a signature for manifest as the specified dockerReference, -// using mech and keyIdentity. -func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return nil, err - } - sig := newUntrustedSignature(manifestDigest, dockerReference) - return sig.sign(mech, keyIdentity) -} - -// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, -// using mech. -func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, - expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { - expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) - if err != nil { - return nil, err - } - sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - if keyIdentity != expectedKeyIdentity { - return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} - } - return nil - }, - validateSignedDockerReference: func(signedDockerReference string) error { - signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) - if err != nil { - return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} - } - if signedRef.String() != expectedRef.String() { - return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", - signedDockerReference, expectedDockerReference)} - } - return nil - }, - validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { - matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) - if err != nil { - return err - } - if !matches { - return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} - } - return nil - }, - }) - if err != nil { - return nil, err - } - return sig, nil -} diff --git a/vendor/github.com/containers/image/v4/signature/json.go b/vendor/github.com/containers/image/v4/signature/json.go deleted file mode 100644 index 9e592863d..000000000 --- a/vendor/github.com/containers/image/v4/signature/json.go +++ /dev/null @@ -1,88 +0,0 @@ -package signature - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -// jsonFormatError is returned when JSON does not match expected format. -type jsonFormatError string - -func (err jsonFormatError) Error() string { - return string(err) -} - -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect -// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to -// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. -// -// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, -// we could use reflection to automate this. Later? -func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { - seenKeys := map[string]struct{}{} - - dec := json.NewDecoder(bytes.NewReader(data)) - t, err := dec.Token() - if err != nil { - return jsonFormatError(err.Error()) - } - if t != json.Delim('{') { - return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) - } - for { - t, err := dec.Token() - if err != nil { - return jsonFormatError(err.Error()) - } - if t == json.Delim('}') { - break - } - - key, ok := t.(string) - if !ok { - // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. - return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) - } - if _, ok := seenKeys[key]; ok { - return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) - } - seenKeys[key] = struct{}{} - - valuePtr := fieldResolver(key) - if valuePtr == nil { - return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) - } - // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. - if err := dec.Decode(valuePtr); err != nil { - return jsonFormatError(err.Error()) - } - } - if _, err := dec.Token(); err != io.EOF { - return jsonFormatError("Unexpected data after JSON object") - } - return nil -} - -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect -// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields -// must be present exactly once, and none other fields are accepted. -func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { - seenKeys := map[string]struct{}{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - if valuePtr, ok := exactFields[key]; ok { - seenKeys[key] = struct{}{} - return valuePtr - } - return nil - }); err != nil { - return err - } - for key := range exactFields { - if _, ok := seenKeys[key]; !ok { - return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) - } - } - return nil -} diff --git a/vendor/github.com/containers/image/v4/signature/mechanism.go b/vendor/github.com/containers/image/v4/signature/mechanism.go deleted file mode 100644 index bdf26c531..000000000 --- a/vendor/github.com/containers/image/v4/signature/mechanism.go +++ /dev/null @@ -1,85 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -package signature - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "strings" - - "golang.org/x/crypto/openpgp" -) - -// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. -// Each mechanism should eventually be closed by calling Close(). -// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to -// eliminate ambiguities, support CA signatures and perhaps other key properties) -type SigningMechanism interface { - // Close removes resources associated with the mechanism, if any. - Close() error - // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. - SupportsSigning() error - // Sign creates a (non-detached) signature of input using keyIdentity. - // Fails with a SigningNotSupportedError if the mechanism does not support signing. - Sign(input []byte, keyIdentity string) ([]byte, error) - // Verify parses unverifiedSignature and returns the content and the signer's identity - Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) - // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, - // along with a short identifier of the key used for signing. - // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) - // is NOT the same as a "key identity" used in other calls ot this interface, and - // the values may have no recognizable relationship if the public key is not available. - UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) -} - -// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. -type SigningNotSupportedError string - -func (err SigningNotSupportedError) Error() string { - return string(err) -} - -// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default -// GPG configuration ($GNUPGHOME / ~/.gnupg) -// The caller must call .Close() on the returned SigningMechanism. -func NewGPGSigningMechanism() (SigningMechanism, error) { - return newGPGSigningMechanismInDirectory("") -} - -// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - return newEphemeralGPGSigningMechanism(blob) -} - -// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. - md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) - if err != nil { - return nil, "", err - } - if !md.IsSigned { - return nil, "", errors.New("The input is not a signature") - } - content, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - // Coverage: An error during reading the body can happen only if - // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key - // to decrypt the contents anyway), or - // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t. - return nil, "", err - } - - // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints - // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! - return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil -} diff --git a/vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go deleted file mode 100644 index 4825ab27c..000000000 --- a/vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go +++ /dev/null @@ -1,175 +0,0 @@ -// +build !containers_image_openpgp - -package signature - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - - "github.com/mtrmac/gpgme" -) - -// A GPG/OpenPGP signing mechanism, implemented using gpgme. -type gpgmeSigningMechanism struct { - ctx *gpgme.Context - ephemeralDir string // If not "", a directory to be removed on Close() -} - -// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. -// The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { - ctx, err := newGPGMEContext(optionalDir) - if err != nil { - return nil, err - } - return &gpgmeSigningMechanism{ - ctx: ctx, - ephemeralDir: "", - }, nil -} - -// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") - if err != nil { - return nil, nil, err - } - removeDir := true - defer func() { - if removeDir { - os.RemoveAll(dir) - } - }() - ctx, err := newGPGMEContext(dir) - if err != nil { - return nil, nil, err - } - mech := &gpgmeSigningMechanism{ - ctx: ctx, - ephemeralDir: dir, - } - keyIdentities, err := mech.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err - } - - removeDir = false - return mech, keyIdentities, nil -} - -// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. -func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { - ctx, err := gpgme.New() - if err != nil { - return nil, err - } - if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { - return nil, err - } - if optionalDir != "" { - err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) - if err != nil { - return nil, err - } - } - ctx.SetArmor(false) - ctx.SetTextMode(false) - return ctx, nil -} - -func (m *gpgmeSigningMechanism) Close() error { - if m.ephemeralDir != "" { - os.RemoveAll(m.ephemeralDir) // Ignore an error, if any - } - return nil -} - -// importKeysFromBytes imports public keys from the supplied blob and returns their identities. -// The blob is assumed to have an appropriate format (the caller is expected to know which one). -// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); -// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. -func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { - inputData, err := gpgme.NewDataBytes(blob) - if err != nil { - return nil, err - } - res, err := m.ctx.Import(inputData) - if err != nil { - return nil, err - } - keyIdentities := []string{} - for _, i := range res.Imports { - if i.Result == nil { - keyIdentities = append(keyIdentities, i.Fingerprint) - } - } - return keyIdentities, nil -} - -// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. -func (m *gpgmeSigningMechanism) SupportsSigning() error { - return nil -} - -// Sign creates a (non-detached) signature of input using keyIdentity. -// Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { - key, err := m.ctx.GetKey(keyIdentity, true) - if err != nil { - return nil, err - } - inputData, err := gpgme.NewDataBytes(input) - if err != nil { - return nil, err - } - var sigBuffer bytes.Buffer - sigData, err := gpgme.NewDataWriter(&sigBuffer) - if err != nil { - return nil, err - } - if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { - return nil, err - } - return sigBuffer.Bytes(), nil -} - -// Verify parses unverifiedSignature and returns the content and the signer's identity -func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { - signedBuffer := bytes.Buffer{} - signedData, err := gpgme.NewDataWriter(&signedBuffer) - if err != nil { - return nil, "", err - } - unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) - if err != nil { - return nil, "", err - } - _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) - if err != nil { - return nil, "", err - } - if len(sigs) != 1 { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} - } - sig := sigs[0] - // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves - if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { - // FIXME: Better error reporting eventually - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} - } - return signedBuffer.Bytes(), sig.Fingerprint, nil -} - -// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - return gpgUntrustedSignatureContents(untrustedSignature) -} diff --git a/vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go deleted file mode 100644 index eccd610c9..000000000 --- a/vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build containers_image_openpgp - -package signature - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "strings" - "time" - - "github.com/containers/storage/pkg/homedir" - "golang.org/x/crypto/openpgp" -) - -// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. -type openpgpSigningMechanism struct { - keyring openpgp.EntityList -} - -// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. -// The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { - m := &openpgpSigningMechanism{ - keyring: openpgp.EntityList{}, - } - - gpgHome := optionalDir - if gpgHome == "" { - gpgHome = os.Getenv("GNUPGHOME") - if gpgHome == "" { - gpgHome = path.Join(homedir.Get(), ".gnupg") - } - } - - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - } else { - _, err := m.importKeysFromBytes(pubring) - if err != nil { - return nil, err - } - } - return m, nil -} - -// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - m := &openpgpSigningMechanism{ - keyring: openpgp.EntityList{}, - } - keyIdentities, err := m.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err - } - return m, keyIdentities, nil -} - -func (m *openpgpSigningMechanism) Close() error { - return nil -} - -// importKeysFromBytes imports public keys from the supplied blob and returns their identities. -// The blob is assumed to have an appropriate format (the caller is expected to know which one). -func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { - keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) - if err != nil { - k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) - if e2 != nil { - return nil, err // The original error -- FIXME: is this better? - } - keyring = k - } - - keyIdentities := []string{} - for _, entity := range keyring { - if entity.PrimaryKey == nil { - // Coverage: This should never happen, openpgp.ReadEntity fails with a - // openpgp.errors.StructuralError instead of returning an entity with this - // field set to nil. - continue - } - // Uppercase the fingerprint to be compatible with gpgme - keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) - m.keyring = append(m.keyring, entity) - } - return keyIdentities, nil -} - -// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. -func (m *openpgpSigningMechanism) SupportsSigning() error { - return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") -} - -// Sign creates a (non-detached) signature of input using keyIdentity. -// Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { - return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") -} - -// Verify parses unverifiedSignature and returns the content and the signer's identity -func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { - md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) - if err != nil { - return nil, "", err - } - if !md.IsSigned { - return nil, "", errors.New("not signed") - } - content, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted - // (and possibly also signed, but it _must_ be encrypted) and the signing - // “modification detection code” detects a mismatch. But in that case, - // we would expect the signature verification to fail as well, and that is checked - // first. Besides, we are not supplying any decryption keys, so we really - // can never reach this “encrypted data MDC mismatch” path. - return nil, "", err - } - if md.SignatureError != nil { - return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) - } - if md.SignedBy == nil { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} - } - if md.Signature != nil { - if md.Signature.SigLifetimeSecs != nil { - expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) - if time.Now().After(expiry) { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} - } - } - } else if md.SignatureV3 == nil { - // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, - // or sets md.SignatureError. - return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} - } - - // Uppercase the fingerprint to be compatible with gpgme - return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil -} - -// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - return gpgUntrustedSignatureContents(untrustedSignature) -} diff --git a/vendor/github.com/containers/image/v4/signature/policy_config.go b/vendor/github.com/containers/image/v4/signature/policy_config.go deleted file mode 100644 index bb229f5f1..000000000 --- a/vendor/github.com/containers/image/v4/signature/policy_config.go +++ /dev/null @@ -1,688 +0,0 @@ -// policy_config.go hanles creation of policy objects, either by parsing JSON -// or by programs building them programmatically. - -// The New* constructors are intended to be a stable API. FIXME: after an independent review. - -// Do not invoke the internals of the JSON marshaling/unmarshaling directly. - -// We can't just blindly call json.Unmarshal because that would silently ignore -// typos, and that would just not do for security policy. - -// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. -// But at least it is not worse than blind json.Unmarshal()… - -package signature - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). -// You can override this at build time with -// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' -var systemDefaultPolicyPath = builtinDefaultPolicyPath - -// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). -// DO NOT change this, instead see systemDefaultPolicyPath above. -const builtinDefaultPolicyPath = "/etc/containers/policy.json" - -// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. -type InvalidPolicyFormatError string - -func (err InvalidPolicyFormatError) Error() string { - return string(err) -} - -// DefaultPolicy returns the default policy of the system. -// Most applications should be using this method to get the policy configured -// by the system administrator. -// sys should usually be nil, can be set to override the default. -// NOTE: When this function returns an error, report it to the user and abort. -// DO NOT hard-code fallback policies in your application. -func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { - return NewPolicyFromFile(defaultPolicyPath(sys)) -} - -// defaultPolicyPath returns a path to the default policy of the system. -func defaultPolicyPath(sys *types.SystemContext) string { - if sys != nil { - if sys.SignaturePolicyPath != "" { - return sys.SignaturePolicyPath - } - if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) - } - } - return systemDefaultPolicyPath -} - -// NewPolicyFromFile returns a policy configured in the specified file. -func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) - if err != nil { - return nil, err - } - policy, err := NewPolicyFromBytes(contents) - if err != nil { - return nil, errors.Wrapf(err, "invalid policy in %q", fileName) - } - return policy, nil -} - -// NewPolicyFromBytes returns a policy parsed from the specified blob. -// Use this function instead of calling json.Unmarshal directly. -func NewPolicyFromBytes(data []byte) (*Policy, error) { - p := Policy{} - if err := json.Unmarshal(data, &p); err != nil { - return nil, InvalidPolicyFormatError(err.Error()) - } - return &p, nil -} - -// Compile-time check that Policy implements json.Unmarshaler. -var _ json.Unmarshaler = (*Policy)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (p *Policy) UnmarshalJSON(data []byte) error { - *p = Policy{} - transports := policyTransportsMap{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "default": - return &p.Default - case "transports": - return &transports - default: - return nil - } - }); err != nil { - return err - } - - if p.Default == nil { - return InvalidPolicyFormatError("Default policy is missing") - } - p.Transports = map[string]PolicyTransportScopes(transports) - return nil -} - -// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. -type policyTransportsMap map[string]PolicyTransportScopes - -// Compile-time check that policyTransportsMap implements json.Unmarshaler. -var _ json.Unmarshaler = (*policyTransportsMap)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { - // We can't unmarshal directly into map values because it is not possible to take an address of a map value. - // So, use a temporary map of pointers-to-slices and convert. - tmpMap := map[string]*PolicyTransportScopes{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // transport can be nil - transport := transports.Get(key) - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. - if _, ok := tmpMap[key]; ok { - return nil - } - ptsWithTransport := policyTransportScopesWithTransport{ - transport: transport, - dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. - } - tmpMap[key] = ptsWithTransport.dest - return &ptsWithTransport - }); err != nil { - return err - } - for key, ptr := range tmpMap { - (*m)[key] = *ptr - } - return nil -} - -// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. -// we want to only use policyTransportScopesWithTransport -var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { - return errors.New("Do not try to unmarshal PolicyTransportScopes directly") -} - -// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes -// while validating using a specific ImageTransport if not nil. -type policyTransportScopesWithTransport struct { - transport types.ImageTransport - dest *PolicyTransportScopes -} - -// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. -var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { - // We can't unmarshal directly into map values because it is not possible to take an address of a map value. - // So, use a temporary map of pointers-to-slices and convert. - tmpMap := map[string]*PolicyRequirements{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. - if _, ok := tmpMap[key]; ok { - return nil - } - if key != "" && m.transport != nil { - if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { - return nil - } - } - ptr := &PolicyRequirements{} // This allocates a new instance on each call. - tmpMap[key] = ptr - return ptr - }); err != nil { - return err - } - for key, ptr := range tmpMap { - (*m.dest)[key] = *ptr - } - return nil -} - -// Compile-time check that PolicyRequirements implements json.Unmarshaler. -var _ json.Unmarshaler = (*PolicyRequirements)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { - reqJSONs := []json.RawMessage{} - if err := json.Unmarshal(data, &reqJSONs); err != nil { - return err - } - if len(reqJSONs) == 0 { - return InvalidPolicyFormatError("List of verification policy requirements must not be empty") - } - res := make([]PolicyRequirement, len(reqJSONs)) - for i, reqJSON := range reqJSONs { - req, err := newPolicyRequirementFromJSON(reqJSON) - if err != nil { - return err - } - res[i] = req - } - *m = res - return nil -} - -// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. -func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { - var typeField prCommon - if err := json.Unmarshal(data, &typeField); err != nil { - return nil, err - } - var res PolicyRequirement - switch typeField.Type { - case prTypeInsecureAcceptAnything: - res = &prInsecureAcceptAnything{} - case prTypeReject: - res = &prReject{} - case prTypeSignedBy: - res = &prSignedBy{} - case prTypeSignedBaseLayer: - res = &prSignedBaseLayer{} - default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) - } - if err := json.Unmarshal(data, &res); err != nil { - return nil, err - } - return res, nil -} - -// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. -func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { - return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} -} - -// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. -func NewPRInsecureAcceptAnything() PolicyRequirement { - return newPRInsecureAcceptAnything() -} - -// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. -var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { - *pr = prInsecureAcceptAnything{} - var tmp prInsecureAcceptAnything - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prTypeInsecureAcceptAnything { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *pr = *newPRInsecureAcceptAnything() - return nil -} - -// newPRReject is NewPRReject, except it returns the private type. -func newPRReject() *prReject { - return &prReject{prCommon{Type: prTypeReject}} -} - -// NewPRReject returns a new "reject" PolicyRequirement. -func NewPRReject() PolicyRequirement { - return newPRReject() -} - -// Compile-time check that prReject implements json.Unmarshaler. -var _ json.Unmarshaler = (*prReject)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prReject) UnmarshalJSON(data []byte) error { - *pr = prReject{} - var tmp prReject - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prTypeReject { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *pr = *newPRReject() - return nil -} - -// newPRSignedBy returns a new prSignedBy if parameters are valid. -func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - if !keyType.IsValid() { - return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) - } - if len(keyPath) > 0 && len(keyData) > 0 { - return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") - } - if signedIdentity == nil { - return nil, InvalidPolicyFormatError("signedIdentity not specified") - } - return &prSignedBy{ - prCommon: prCommon{Type: prTypeSignedBy}, - KeyType: keyType, - KeyPath: keyPath, - KeyData: keyData, - SignedIdentity: signedIdentity, - }, nil -} - -// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. -func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, keyPath, nil, signedIdentity) -} - -// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath -func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) -} - -// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. -func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, "", keyData, signedIdentity) -} - -// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData -func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedByKeyData(keyType, keyData, signedIdentity) -} - -// Compile-time check that prSignedBy implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSignedBy)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSignedBy) UnmarshalJSON(data []byte) error { - *pr = prSignedBy{} - var tmp prSignedBy - var gotKeyPath, gotKeyData = false, false - var signedIdentity json.RawMessage - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "type": - return &tmp.Type - case "keyType": - return &tmp.KeyType - case "keyPath": - gotKeyPath = true - return &tmp.KeyPath - case "keyData": - gotKeyData = true - return &tmp.KeyData - case "signedIdentity": - return &signedIdentity - default: - return nil - } - }); err != nil { - return err - } - - if tmp.Type != prTypeSignedBy { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - if signedIdentity == nil { - tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() - } else { - si, err := newPolicyReferenceMatchFromJSON(signedIdentity) - if err != nil { - return err - } - tmp.SignedIdentity = si - } - - var res *prSignedBy - var err error - switch { - case gotKeyPath && gotKeyData: - return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") - case gotKeyPath && !gotKeyData: - res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) - case !gotKeyPath && gotKeyData: - res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) - case !gotKeyPath && !gotKeyData: - return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") - default: // Coverage: This should never happen - return errors.Errorf("Impossible keyPath/keyData presence combination!?") - } - if err != nil { - return err - } - *pr = *res - - return nil -} - -// IsValid returns true iff kt is a recognized value -func (kt sbKeyType) IsValid() bool { - switch kt { - case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, - SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: - return true - default: - return false - } -} - -// Compile-time check that sbKeyType implements json.Unmarshaler. -var _ json.Unmarshaler = (*sbKeyType)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (kt *sbKeyType) UnmarshalJSON(data []byte) error { - *kt = sbKeyType("") - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if !sbKeyType(s).IsValid() { - return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) - } - *kt = sbKeyType(s) - return nil -} - -// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. -func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { - if baseLayerIdentity == nil { - return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") - } - return &prSignedBaseLayer{ - prCommon: prCommon{Type: prTypeSignedBaseLayer}, - BaseLayerIdentity: baseLayerIdentity, - }, nil -} - -// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. -func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedBaseLayer(baseLayerIdentity) -} - -// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { - *pr = prSignedBaseLayer{} - var tmp prSignedBaseLayer - var baseLayerIdentity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "baseLayerIdentity": &baseLayerIdentity, - }); err != nil { - return err - } - - if tmp.Type != prTypeSignedBaseLayer { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) - if err != nil { - return err - } - res, err := newPRSignedBaseLayer(bli) - if err != nil { - // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. - return err - } - *pr = *res - return nil -} - -// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. -func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { - var typeField prmCommon - if err := json.Unmarshal(data, &typeField); err != nil { - return nil, err - } - var res PolicyReferenceMatch - switch typeField.Type { - case prmTypeMatchExact: - res = &prmMatchExact{} - case prmTypeMatchRepoDigestOrExact: - res = &prmMatchRepoDigestOrExact{} - case prmTypeMatchRepository: - res = &prmMatchRepository{} - case prmTypeExactReference: - res = &prmExactReference{} - case prmTypeExactRepository: - res = &prmExactRepository{} - default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) - } - if err := json.Unmarshal(data, &res); err != nil { - return nil, err - } - return res, nil -} - -// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. -func newPRMMatchExact() *prmMatchExact { - return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} -} - -// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. -func NewPRMMatchExact() PolicyReferenceMatch { - return newPRMMatchExact() -} - -// Compile-time check that prmMatchExact implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchExact)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { - *prm = prmMatchExact{} - var tmp prmMatchExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchExact() - return nil -} - -// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. -func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { - return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} -} - -// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. -func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { - return newPRMMatchRepoDigestOrExact() -} - -// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { - *prm = prmMatchRepoDigestOrExact{} - var tmp prmMatchRepoDigestOrExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchRepoDigestOrExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchRepoDigestOrExact() - return nil -} - -// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. -func newPRMMatchRepository() *prmMatchRepository { - return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} -} - -// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. -func NewPRMMatchRepository() PolicyReferenceMatch { - return newPRMMatchRepository() -} - -// Compile-time check that prmMatchRepository implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchRepository)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { - *prm = prmMatchRepository{} - var tmp prmMatchRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchRepository() - return nil -} - -// newPRMExactReference is NewPRMExactReference, except it resturns the private type. -func newPRMExactReference(dockerReference string) (*prmExactReference, error) { - ref, err := reference.ParseNormalizedNamed(dockerReference) - if err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) - } - if reference.IsNameOnly(ref) { - return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) - } - return &prmExactReference{ - prmCommon: prmCommon{Type: prmTypeExactReference}, - DockerReference: dockerReference, - }, nil -} - -// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. -func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { - return newPRMExactReference(dockerReference) -} - -// Compile-time check that prmExactReference implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmExactReference)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmExactReference) UnmarshalJSON(data []byte) error { - *prm = prmExactReference{} - var tmp prmExactReference - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "dockerReference": &tmp.DockerReference, - }); err != nil { - return err - } - - if tmp.Type != prmTypeExactReference { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - - res, err := newPRMExactReference(tmp.DockerReference) - if err != nil { - return err - } - *prm = *res - return nil -} - -// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. -func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { - if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) - } - return &prmExactRepository{ - prmCommon: prmCommon{Type: prmTypeExactRepository}, - DockerRepository: dockerRepository, - }, nil -} - -// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. -func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { - return newPRMExactRepository(dockerRepository) -} - -// Compile-time check that prmExactRepository implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmExactRepository)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { - *prm = prmExactRepository{} - var tmp prmExactRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "dockerRepository": &tmp.DockerRepository, - }); err != nil { - return err - } - - if tmp.Type != prmTypeExactRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - - res, err := newPRMExactRepository(tmp.DockerRepository) - if err != nil { - return err - } - *prm = *res - return nil -} diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval.go b/vendor/github.com/containers/image/v4/signature/policy_eval.go deleted file mode 100644 index 110d40f7c..000000000 --- a/vendor/github.com/containers/image/v4/signature/policy_eval.go +++ /dev/null @@ -1,289 +0,0 @@ -// This defines the top-level policy evaluation API. -// To the extent possible, the interface of the fuctions provided -// here is intended to be completely unambiguous, and stable for users -// to rely on. - -package signature - -import ( - "context" - - "github.com/containers/image/v4/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// PolicyRequirementError is an explanatory text for rejecting a signature or an image. -type PolicyRequirementError string - -func (err PolicyRequirementError) Error() string { - return string(err) -} - -// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. -type signatureAcceptanceResult string - -const ( - sarAccepted signatureAcceptanceResult = "sarAccepted" - sarRejected signatureAcceptanceResult = "sarRejected" - sarUnknown signatureAcceptanceResult = "sarUnknown" -) - -// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. -// The type is public, but its definition is private. -type PolicyRequirement interface { - // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache - // costly initialization like creating temporary GPG home directories and reading files. - // Setup() (someState, error) - // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. - - // isSignatureAuthorAccepted, given an image and a signature blob, returns: - // - sarAccepted if the signature has been verified against the appropriate public key - // (where "appropriate public key" may depend on the contents of the signature); - // in that case a parsed Signature should be returned. - // - sarRejected if the signature has not been verified; - // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation - // succeeded but the result was rejection. - // - sarUnknown if if this PolicyRequirement does not deal with signatures. - // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. - // Returning sarUnknown and a non-nil error value is invalid. - // WARNING: This makes the signature contents acceptable for futher processing, - // but it does not necessarily mean that the contents of the signature are - // consistent with local policy. - // For example: - // - Do not use a true value to determine whether to run - // a container based on this image; use IsRunningImageAllowed instead. - // - Just because a signature is accepted does not automatically mean the contents of the - // signature are authorized to run code as root, or to affect system or cluster configuration. - isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) - - // isRunningImageAllowed returns true if the requirement allows running an image. - // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation - // succeeded but the result was rejection. - // WARNING: This validates signatures and the manifest, but does not download or validate the - // layers. Users must validate that the layers match their expected digests. - isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) -} - -// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. -// The type is public, but its implementation is private. -type PolicyReferenceMatch interface { - // matchesDockerReference decides whether a specific image identity is accepted for an image - // (or, usually, for the image's Reference().DockerReference()). Note that - // image.Reference().DockerReference() may be nil. - matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool -} - -// PolicyContext encapsulates a policy and possible cached state -// for speeding up its evaluation. -type PolicyContext struct { - Policy *Policy - state policyContextState // Internal consistency checking -} - -// policyContextState is used internally to verify the users are not misusing a PolicyContext. -type policyContextState string - -const ( - pcInvalid policyContextState = "" - pcInitializing policyContextState = "Initializing" - pcReady policyContextState = "Ready" - pcInUse policyContextState = "InUse" - pcDestroying policyContextState = "Destroying" - pcDestroyed policyContextState = "Destroyed" -) - -// changeContextState changes pc.state, or fails if the state is unexpected -func (pc *PolicyContext) changeState(expected, new policyContextState) error { - if pc.state != expected { - return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) - } - pc.state = new - return nil -} - -// NewPolicyContext sets up and initializes a context for the specified policy. -// The policy must not be modified while the context exists. FIXME: make a deep copy? -// If this function succeeds, the caller should call PolicyContext.Destroy() when done. -func NewPolicyContext(policy *Policy) (*PolicyContext, error) { - pc := &PolicyContext{Policy: policy, state: pcInitializing} - // FIXME: initialize - if err := pc.changeState(pcInitializing, pcReady); err != nil { - // Huh?! This should never fail, we didn't give the pointer to anybody. - // Just give up and leave unclean state around. - return nil, err - } - return pc, nil -} - -// Destroy should be called when the user of the context is done with it. -func (pc *PolicyContext) Destroy() error { - if err := pc.changeState(pcReady, pcDestroying); err != nil { - return err - } - // FIXME: destroy - return pc.changeState(pcDestroying, pcDestroyed) -} - -// policyIdentityLogName returns a string description of the image identity for policy purposes. -// ONLY use this for log messages, not for any decisions! -func policyIdentityLogName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() -} - -// requirementsForImageRef selects the appropriate requirements for ref. -func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { - // Do we have a PolicyTransportScopes for this transport? - transportName := ref.Transport().Name() - if transportScopes, ok := pc.Policy.Transports[transportName]; ok { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if req, ok := transportScopes[identity]; ok { - logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) - return req - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if req, ok := transportScopes[name]; ok { - logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) - return req - } - } - - // Look for a default match for the transport. - if req, ok := transportScopes[""]; ok { - logrus.Debugf(` Using transport "%s" policy section ""`, transportName) - return req - } - } - - logrus.Debugf(" Using default policy section") - return pc.Policy.Default -} - -// GetSignaturesWithAcceptedAuthor returns those signatures from an image -// for which the policy accepts the author (and which have been successfully -// verified). -// NOTE: This may legitimately return an empty list and no error, if the image -// has no signatures or only invalid signatures. -// WARNING: This makes the signature contents acceptable for futher processing, -// but it does not necessarily mean that the contents of the signature are -// consistent with local policy. -// For example: -// - Do not use a an existence of an accepted signature to determine whether to run -// a container based on this image; use IsRunningImageAllowed instead. -// - Just because a signature is accepted does not automatically mean the contents of the -// signature are authorized to run code as root, or to affect system or cluster configuration. -func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { - if err := pc.changeState(pcReady, pcInUse); err != nil { - return nil, err - } - defer func() { - if err := pc.changeState(pcInUse, pcReady); err != nil { - sigs = nil - finalErr = err - } - }() - - logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) - reqs := pc.requirementsForImageRef(image.Reference()) - - // FIXME: rename Signatures to UnverifiedSignatures - // FIXME: pass context.Context - unverifiedSignatures, err := image.Signatures(ctx) - if err != nil { - return nil, err - } - - res := make([]*Signature, 0, len(unverifiedSignatures)) - for sigNumber, sig := range unverifiedSignatures { - var acceptedSig *Signature // non-nil if accepted - rejected := false - // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! - logrus.Debugf("Evaluating signature %d:", sigNumber) - interpretingReqs: - for reqNumber, req := range reqs { - // FIXME: Log the requirement itself? For now, we use just the number. - // FIXME: supply state - switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { - case sarAccepted: - if as == nil { // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) - rejected = true - break interpretingReqs - } - logrus.Debugf(" Requirement %d: signature accepted", reqNumber) - if acceptedSig == nil { - acceptedSig = as - } else if *as != *acceptedSig { // Coverage: this should never happen - // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? - logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) - rejected = true - acceptedSig = nil - break interpretingReqs - } - case sarRejected: - logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) - rejected = true - break interpretingReqs - case sarUnknown: - if err != nil { // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) - rejected = true - break interpretingReqs - } - logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) - default: // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) - rejected = true - break interpretingReqs - } - } - // This also handles the (invalid) case of empty reqs, by rejecting the signature. - if acceptedSig != nil && !rejected { - logrus.Debugf(" Overall: OK, signature accepted") - res = append(res, acceptedSig) - } else { - logrus.Debugf(" Overall: Signature not accepted") - } - } - return res, nil -} - -// IsRunningImageAllowed returns true iff the policy allows running the image. -// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation -// succeeded but the result was rejection. -// WARNING: This validates signatures and the manifest, but does not download or validate the -// layers. Users must validate that the layers match their expected digests. -func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { - if err := pc.changeState(pcReady, pcInUse); err != nil { - return false, err - } - defer func() { - if err := pc.changeState(pcInUse, pcReady); err != nil { - res = false - finalErr = err - } - }() - - logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) - reqs := pc.requirementsForImageRef(image.Reference()) - - if len(reqs) == 0 { - return false, PolicyRequirementError("List of verification policy requirements must not be empty") - } - - for reqNumber, req := range reqs { - // FIXME: supply state - allowed, err := req.isRunningImageAllowed(ctx, image) - if !allowed { - logrus.Debugf("Requirement %d: denied, done", reqNumber) - return false, err - } - logrus.Debugf(" Requirement %d: allowed", reqNumber) - } - // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. - logrus.Debugf("Overall: allowed") - return true, nil -} diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go deleted file mode 100644 index 18fb651d1..000000000 --- a/vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go +++ /dev/null @@ -1,20 +0,0 @@ -// Policy evaluation for prSignedBaseLayer. - -package signature - -import ( - "context" - - "github.com/containers/image/v4/types" - "github.com/sirupsen/logrus" -) - -func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - return sarUnknown, nil, nil -} - -func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - // FIXME? Reject this at policy parsing time already? - logrus.Errorf("signedBaseLayer not implemented yet!") - return false, PolicyRequirementError("signedBaseLayer not implemented yet!") -} diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go deleted file mode 100644 index b8188da5e..000000000 --- a/vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go +++ /dev/null @@ -1,131 +0,0 @@ -// Policy evaluation for prSignedBy. - -package signature - -import ( - "context" - "fmt" - "io/ioutil" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/types" - "github.com/opencontainers/go-digest" -) - -func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - switch pr.KeyType { - case SBKeyTypeGPGKeys: - case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: - // FIXME? Reject this at policy parsing time already? - return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) - default: - // This should never happen, newPRSignedBy ensures KeyType.IsValid() - return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) - } - - if pr.KeyPath != "" && pr.KeyData != nil { - return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) - } - // FIXME: move this to per-context initialization - var data []byte - if pr.KeyData != nil { - data = pr.KeyData - } else { - d, err := ioutil.ReadFile(pr.KeyPath) - if err != nil { - return sarRejected, nil, err - } - data = d - } - - // FIXME: move this to per-context initialization - mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) - if err != nil { - return sarRejected, nil, err - } - defer mech.Close() - if len(trustedIdentities) == 0 { - return sarRejected, nil, PolicyRequirementError("No public keys imported") - } - - signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - for _, trustedIdentity := range trustedIdentities { - if keyIdentity == trustedIdentity { - return nil - } - } - // Coverage: We use a private GPG home directory and only import trusted keys, so this should - // not be reachable. - return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) - }, - validateSignedDockerReference: func(ref string) error { - if !pr.SignedIdentity.matchesDockerReference(image, ref) { - return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) - } - return nil - }, - validateSignedDockerManifestDigest: func(digest digest.Digest) error { - m, _, err := image.Manifest(ctx) - if err != nil { - return err - } - digestMatches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return err - } - if !digestMatches { - return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) - } - return nil - }, - }) - if err != nil { - return sarRejected, nil, err - } - - return sarAccepted, signature, nil -} - -func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - // FIXME: pass context.Context - sigs, err := image.Signatures(ctx) - if err != nil { - return false, err - } - var rejections []error - for _, s := range sigs { - var reason error - switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { - case sarAccepted: - // One accepted signature is enough. - return true, nil - case sarRejected: - reason = err - case sarUnknown: - // Huh?! This should not happen at all; treat it as any other invalid value. - fallthrough - default: - reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) - } - rejections = append(rejections, reason) - } - var summary error - switch len(rejections) { - case 0: - summary = PolicyRequirementError("A signature was required, but no signature exists") - case 1: - summary = rejections[0] - default: - var msgs []string - for _, e := range rejections { - msgs = append(msgs, e.Error()) - } - summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", - strings.Join(msgs, "; "))) - } - return false, summary -} diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v4/signature/policy_eval_simple.go deleted file mode 100644 index 7fbcf4a94..000000000 --- a/vendor/github.com/containers/image/v4/signature/policy_eval_simple.go +++ /dev/null @@ -1,29 +0,0 @@ -// Policy evaluation for the various simple PolicyRequirement types. - -package signature - -import ( - "context" - "fmt" - - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" -) - -func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - // prInsecureAcceptAnything semantics: Every image is allowed to run, - // but this does not consider the signature as verified. - return sarUnknown, nil, nil -} - -func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - return true, nil -} - -func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) -} - -func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) -} diff --git a/vendor/github.com/containers/image/v4/signature/policy_reference_match.go b/vendor/github.com/containers/image/v4/signature/policy_reference_match.go deleted file mode 100644 index 016d737fb..000000000 --- a/vendor/github.com/containers/image/v4/signature/policy_reference_match.go +++ /dev/null @@ -1,101 +0,0 @@ -// PolicyReferenceMatch implementations. - -package signature - -import ( - "fmt" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" -) - -// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. -func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { - r1 := image.Reference().DockerReference() - if r1 == nil { - return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", - transports.ImageName(image.Reference()))) - } - r2, err := reference.ParseNormalizedNamed(s2) - if err != nil { - return nil, nil, err - } - return r1, r2, nil -} - -func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { - return false - } - return signature.String() == intended.String() -} - -func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - - // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(signature) { - return false - } - switch intended.(type) { - case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. - return signature.String() == intended.String() - case reference.Canonical: - // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. - // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, - // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) - return signature.Name() == intended.Name() - default: // !reference.IsNameOnly(intended) - return false - } -} - -func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - return signature.Name() == intended.Name() -} - -// parseDockerReferences converts two reference strings into parsed entities, failing on any error -func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { - r1, err := reference.ParseNormalizedNamed(s1) - if err != nil { - return nil, nil, err - } - r2, err := reference.ParseNormalizedNamed(s2) - if err != nil { - return nil, nil, err - } - return r1, r2, nil -} - -func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) - if err != nil { - return false - } - // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { - return false - } - return signature.String() == intended.String() -} - -func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) - if err != nil { - return false - } - return signature.Name() == intended.Name() -} diff --git a/vendor/github.com/containers/image/v4/signature/policy_types.go b/vendor/github.com/containers/image/v4/signature/policy_types.go deleted file mode 100644 index d3b33bb7a..000000000 --- a/vendor/github.com/containers/image/v4/signature/policy_types.go +++ /dev/null @@ -1,152 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -// This defines types used to represent a signature verification policy in memory. -// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements -// built using the constructor functions provided in policy_config.go. - -package signature - -// NOTE: Keep this in sync with docs/containers-policy.json.5.md! - -// Policy defines requirements for considering a signature, or an image, valid. -type Policy struct { - // Default applies to any image which does not have a matching policy in Transports. - // Note that this can happen even if a matching PolicyTransportScopes exists in Transports - // if the image matches none of the scopes. - Default PolicyRequirements `json:"default"` - Transports map[string]PolicyTransportScopes `json:"transports"` -} - -// PolicyTransportScopes defines policies for images for a specific transport, -// for various scopes, the map keys. -// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); -// there is one scope precisely matching to a single image, and namespace scopes as prefixes -// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) -// The empty scope, if exists, is considered a parent namespace of all other scopes. -// Most specific scope wins, duplication is prohibited (hard failure). -type PolicyTransportScopes map[string]PolicyRequirements - -// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). -// Must not be empty, frequently will only contain a single element. -type PolicyRequirements []PolicyRequirement - -// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. -// The type is public, but its definition is private. - -// prCommon is the common type field in a JSON encoding of PolicyRequirement. -type prCommon struct { - Type prTypeIdentifier `json:"type"` -} - -// prTypeIdentifier is string designating a kind of a PolicyRequirement. -type prTypeIdentifier string - -const ( - prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" - prTypeReject prTypeIdentifier = "reject" - prTypeSignedBy prTypeIdentifier = "signedBy" - prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" -) - -// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: -// every image is allowed to run. -// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). -// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). -// FIXME? Better name? -type prInsecureAcceptAnything struct { - prCommon -} - -// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. -type prReject struct { - prCommon -} - -// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity -type prSignedBy struct { - prCommon - - // KeyType specifies what kind of key reference KeyPath/KeyData is. - // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” - // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only - KeyType sbKeyType `json:"keyType"` - - // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. - KeyPath string `json:"keyPath,omitempty"` - // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. - KeyData []byte `json:"keyData,omitempty"` - - // SignedIdentity specifies what image identity the signature must be claiming about the image. - // Defaults to "match-exact" if not specified. - SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` -} - -// sbKeyType are the allowed values for prSignedBy.KeyType -type sbKeyType string - -const ( - // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring - SBKeyTypeGPGKeys sbKeyType = "GPGKeys" - // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring - SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" - // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates - // FIXME: PEM, DER? - SBKeyTypeX509Certificates sbKeyType = "X509Certificates" - // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs - // FIXME: PEM, DER? - SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" -) - -// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. -type prSignedBaseLayer struct { - prCommon - // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. - BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` -} - -// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. -// The type is public, but its implementation is private. - -// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. -type prmCommon struct { - Type prmTypeIdentifier `json:"type"` -} - -// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. -type prmTypeIdentifier string - -const ( - prmTypeMatchExact prmTypeIdentifier = "matchExact" - prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" - prmTypeMatchRepository prmTypeIdentifier = "matchRepository" - prmTypeExactReference prmTypeIdentifier = "exactReference" - prmTypeExactRepository prmTypeIdentifier = "exactRepository" -) - -// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. -type prmMatchExact struct { - prmCommon -} - -// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, -// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest -type prmMatchRepoDigestOrExact struct { - prmCommon -} - -// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. -type prmMatchRepository struct { - prmCommon -} - -// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. -type prmExactReference struct { - prmCommon - DockerReference string `json:"dockerReference"` -} - -// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. -type prmExactRepository struct { - prmCommon - DockerRepository string `json:"dockerRepository"` -} diff --git a/vendor/github.com/containers/image/v4/signature/signature.go b/vendor/github.com/containers/image/v4/signature/signature.go deleted file mode 100644 index 09c4de0b3..000000000 --- a/vendor/github.com/containers/image/v4/signature/signature.go +++ /dev/null @@ -1,280 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! - -package signature - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/pkg/errors" - - "github.com/containers/image/v4/version" - "github.com/opencontainers/go-digest" -) - -const ( - signatureType = "atomic container signature" -) - -// InvalidSignatureError is returned when parsing an invalid signature. -type InvalidSignatureError struct { - msg string -} - -func (err InvalidSignatureError) Error() string { - return err.msg -} - -// Signature is a parsed content of a signature. -// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. -type Signature struct { - DockerManifestDigest digest.Digest - DockerReference string // FIXME: more precise type? -} - -// untrustedSignature is a parsed content of a signature. -type untrustedSignature struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string - // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, - // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). - // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, - // we would add another field, UntrustedTimestampNS int64. - UntrustedTimestamp *int64 -} - -// UntrustedSignatureInformation is information available in an untrusted signature. -// This may be useful when debugging signature verification failures, -// or when managing a set of signatures on a single image. -// -// WARNING: Do not use the contents of this for ANY security decisions, -// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. -// There is NO REASON to expect the values to be correct, or not intentionally misleading -// (including things like “✅ Verified by $authority”) -type UntrustedSignatureInformation struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string - UntrustedTimestamp *time.Time - UntrustedShortKeyIdentifier string -} - -// newUntrustedSignature returns an untrustedSignature object with -// the specified primary contents and appropriate metadata. -func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { - // Use intermediate variables for these values so that we can take their addresses. - // Golang guarantees that they will have a new address on every execution. - creatorID := "atomic " + version.Version - timestamp := time.Now().Unix() - return untrustedSignature{ - UntrustedDockerManifestDigest: dockerManifestDigest, - UntrustedDockerReference: dockerReference, - UntrustedCreatorID: &creatorID, - UntrustedTimestamp: ×tamp, - } -} - -// Compile-time check that untrustedSignature implements json.Marshaler -var _ json.Marshaler = (*untrustedSignature)(nil) - -// MarshalJSON implements the json.Marshaler interface. -func (s untrustedSignature) MarshalJSON() ([]byte, error) { - if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { - return nil, errors.New("Unexpected empty signature content") - } - critical := map[string]interface{}{ - "type": signatureType, - "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, - "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, - } - optional := map[string]interface{}{} - if s.UntrustedCreatorID != nil { - optional["creator"] = *s.UntrustedCreatorID - } - if s.UntrustedTimestamp != nil { - optional["timestamp"] = *s.UntrustedTimestamp - } - signature := map[string]interface{}{ - "critical": critical, - "optional": optional, - } - return json.Marshal(signature) -} - -// Compile-time check that untrustedSignature implements json.Unmarshaler -var _ json.Unmarshaler = (*untrustedSignature)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface -func (s *untrustedSignature) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if _, ok := err.(jsonFormatError); ok { - err = InvalidSignatureError{msg: err.Error()} - } - } - return err -} - -// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. -// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. -func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { - var critical, optional json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "critical": &critical, - "optional": &optional, - }); err != nil { - return err - } - - var creatorID string - var timestamp float64 - var gotCreatorID, gotTimestamp = false, false - if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { - switch key { - case "creator": - gotCreatorID = true - return &creatorID - case "timestamp": - gotTimestamp = true - return ×tamp - default: - var ignore interface{} - return &ignore - } - }); err != nil { - return err - } - if gotCreatorID { - s.UntrustedCreatorID = &creatorID - } - if gotTimestamp { - intTimestamp := int64(timestamp) - if float64(intTimestamp) != timestamp { - return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} - } - s.UntrustedTimestamp = &intTimestamp - } - - var t string - var image, identity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ - "type": &t, - "image": &image, - "identity": &identity, - }); err != nil { - return err - } - if t != signatureType { - return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} - } - - var digestString string - if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ - "docker-manifest-digest": &digestString, - }); err != nil { - return err - } - s.UntrustedDockerManifestDigest = digest.Digest(digestString) - - return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ - "docker-reference": &s.UntrustedDockerReference, - }) -} - -// Sign formats the signature and returns a blob signed using mech and keyIdentity -// (If it seems surprising that this is a method on untrustedSignature, note that there -// isn’t a good reason to think that a key used by the user is trusted by any component -// of the system just because it is a private key — actually the presence of a private key -// on the system increases the likelihood of an a successful attack on that private key -// on that particular system.) -func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { - json, err := json.Marshal(s) - if err != nil { - return nil, err - } - - return mech.Sign(json, keyIdentity) -} - -// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. -// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies -// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature -// because the functions have the same or similar types, so there is a risk of exchanging the functions; -// named members of this struct are more explicit. -type signatureAcceptanceRules struct { - validateKeyIdentity func(string) error - validateSignedDockerReference func(string) error - validateSignedDockerManifestDigest func(digest.Digest) error -} - -// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components -// match expected values, both as specified by rules, and returns it -func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { - signed, keyIdentity, err := mech.Verify(unverifiedSignature) - if err != nil { - return nil, err - } - if err := rules.validateKeyIdentity(keyIdentity); err != nil { - return nil, err - } - - var unmatchedSignature untrustedSignature - if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} - } - if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { - return nil, err - } - if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { - return nil, err - } - // signatureAcceptanceRules have accepted this value. - return &Signature{ - DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, - DockerReference: unmatchedSignature.UntrustedDockerReference, - }, nil -} - -// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, -// WITHOUT doing any cryptographic verification. -// This may be useful when debugging signature verification failures, -// or when managing a set of signatures on a single image. -// -// WARNING: Do not use the contents of this for ANY security decisions, -// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. -// There is NO REASON to expect the values to be correct, or not intentionally misleading -// (including things like “✅ Verified by $authority”) -func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { - // NOTE: This should eventualy do format autodetection. - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - if err != nil { - return nil, err - } - defer mech.Close() - - untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) - if err != nil { - return nil, err - } - var untrustedDecodedContents untrustedSignature - if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} - } - - var timestamp *time.Time // = nil - if untrustedDecodedContents.UntrustedTimestamp != nil { - ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) - timestamp = &ts - } - return &UntrustedSignatureInformation{ - UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, - UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, - UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, - UntrustedTimestamp: timestamp, - UntrustedShortKeyIdentifier: shortKeyIdentifier, - }, nil -} diff --git a/vendor/github.com/containers/image/v4/storage/storage_image.go b/vendor/github.com/containers/image/v4/storage/storage_image.go deleted file mode 100644 index 4e913b84c..000000000 --- a/vendor/github.com/containers/image/v4/storage/storage_image.go +++ /dev/null @@ -1,956 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - "sync/atomic" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/internal/tmpdir" - "github.com/containers/image/v4/manifest" - "github.com/containers/image/v4/pkg/blobinfocache/none" - "github.com/containers/image/v4/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - // ErrBlobDigestMismatch is returned when PutBlob() is given a blob - // with a digest-based name that doesn't match its contents. - ErrBlobDigestMismatch = errors.New("blob digest mismatch") - // ErrBlobSizeMismatch is returned when PutBlob() is given a blob - // with an expected size that doesn't match the reader. - ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetManifest() is called. - // with a non-nil instanceDigest. - ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") - // ErrNoSuchImage is returned when we attempt to access an image which - // doesn't exist in the storage area. - ErrNoSuchImage = storage.ErrNotAnImage -) - -type storageImageSource struct { - imageRef storageReference - image *storage.Image - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice -} - -type storageImageDestination struct { - imageRef storageReference - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - signatures []byte // Signature contents, temporary - putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice -} - -type storageImageCloser struct { - types.ImageCloser - size int64 -} - -// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. -// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably; -// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey -func manifestBigDataKey(digest digest.Digest) string { - return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() -} - -// newImageSource sets up an image for reading. -func newImageSource(imageRef storageReference) (*storageImageSource, error) { - // First, locate the image. - img, err := imageRef.resolveImage() - if err != nil { - return nil, err - } - - // Build the reader object. - image := &storageImageSource{ - imageRef: imageRef, - image: img, - layerPosition: make(map[digest.Digest]int), - SignatureSizes: []int{}, - } - if img.Metadata != "" { - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { - return nil, errors.Wrap(err, "error decoding metadata for source image") - } - } - return image, nil -} - -// Reference returns the image reference that we used to find this image. -func (s *storageImageSource) Reference() types.ImageReference { - return s.imageRef -} - -// Close cleans up any resources we tied up while reading the image. -func (s *storageImageSource) Close() error { - return nil -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *storageImageSource) HasThreadSafeGetBlob() bool { - return true -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { - if info.Digest == image.GzippedEmptyLayerDigest { - return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil - } - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - var layer storage.Layer - var diffOptions *storage.DiffOptions - // We need a valid digest value. - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) - // If it's not a layer, then it must be a data item. - if len(layers) == 0 { - b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // Step through the list of matching layers. Tests may want to verify that if we have multiple layers - // which claim to have the same contents, that we actually do have multiple layers, otherwise we could - // just go ahead and use the first one every time. - s.getBlobMutex.Lock() - i := s.layerPosition[info.Digest] - s.layerPosition[info.Digest] = i + 1 - s.getBlobMutex.Unlock() - if len(layers) > 0 { - layer = layers[i%len(layers)] - } - // Force the storage layer to not try to match any compression that was used when the layer was first - // handed to it. - noCompression := archive.Uncompressed - diffOptions = &storage.DiffOptions{ - Compression: &noCompression, - } - if layer.UncompressedSize < 0 { - n = -1 - } else { - n = layer.UncompressedSize - } - logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) - rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) - if err != nil { - return nil, -1, "", err - } - return rc, n, layer.ID, err -} - -// GetManifest() reads the image's manifest. -func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { - if instanceDigest != nil { - return nil, "", ErrNoManifestLists - } - if len(s.cachedManifest) == 0 { - // The manifest is stored as a big data item. - // Prefer the manifest corresponding to the user-specified digest, if available. - if s.imageRef.named != nil { - if digested, ok := s.imageRef.named.(reference.Digested); ok { - key := manifestBigDataKey(digested.Digest()) - blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) - if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key - return nil, "", err - } - if err == nil { - s.cachedManifest = blob - } - } - } - // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. - // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). - if len(s.cachedManifest) == 0 { - cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) - if err != nil { - return nil, "", err - } - s.cachedManifest = cachedBlob - } - } - return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err -} - -// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of -// the image, after they've been decompressed. -func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - manifestBlob, manifestType, err := s.GetManifest(ctx, nil) - if err != nil { - return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID) - } - man, err := manifest.FromBlob(manifestBlob, manifestType) - if err != nil { - return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID) - } - - uncompressedLayerType := "" - switch manifestType { - case imgspecv1.MediaTypeImageManifest: - uncompressedLayerType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: - // This is actually a compressed type, but there's no uncompressed type defined - uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType - } - - physicalBlobInfos := []types.BlobInfo{} - layerID := s.image.TopLayer - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "error reading layer %q in image %q", layerID, s.image.ID) - } - if layer.UncompressedDigest == "" { - return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) - } - if layer.UncompressedSize < 0 { - return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) - } - blobInfo := types.BlobInfo{ - Digest: layer.UncompressedDigest, - Size: layer.UncompressedSize, - MediaType: uncompressedLayerType, - } - physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) - layerID = layer.Parent - } - - res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) - if err != nil { - return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID) - } - return res, nil -} - -// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, -// but using layer data which we can actually produce — physicalInfos for non-empty layers, -// and image.GzippedEmptyLayer for empty ones. -// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) -func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { - nextPhysical := 0 - res := make([]types.BlobInfo, len(manifestInfos)) - for i, mi := range manifestInfos { - if mi.EmptyLayer { - res[i] = types.BlobInfo{ - Digest: image.GzippedEmptyLayerDigest, - Size: int64(len(image.GzippedEmptyLayer)), - MediaType: mi.MediaType, - } - } else { - if nextPhysical >= len(physicalInfos) { - return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) - } - res[i] = physicalInfos[nextPhysical] - nextPhysical++ - } - } - if nextPhysical != len(physicalInfos) { - return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) - } - return res, nil -} - -// GetSignatures() parses the image's signatures blob into a slice of byte slices. -func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { - if instanceDigest != nil { - return nil, ErrNoManifestLists - } - var offset int - sigslice := [][]byte{} - signature := []byte{} - if len(s.SignatureSizes) > 0 { - signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, "signatures") - if err != nil { - return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.image.ID) - } - signature = signatureBlob - } - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - -// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until -// it's time to Commit() the image -func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { - directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage") - if err != nil { - return nil, errors.Wrapf(err, "error creating a temporary directory") - } - image := &storageImageDestination{ - imageRef: imageRef, - directory: directory, - blobDiffIDs: make(map[digest.Digest]digest.Digest), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - } - return image, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (s *storageImageDestination) Reference() types.ImageReference { - return s.imageRef -} - -// Close cleans up the temporary directory. -func (s *storageImageDestination) Close() error { - return os.RemoveAll(s.directory) -} - -func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { - // We ultimately have to decompress layers to populate trees on disk - // and need to explicitly ask for it here, so that the layers' MIME - // types can be set accordingly. - return types.PreserveOriginal -} - -func (s *storageImageDestination) computeNextBlobCacheFile() string { - return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (s *storageImageDestination) HasThreadSafePutBlob() bool { - return true -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - // Stores a layer or data blob in our temporary directory, checking that any information - // in the blobinfo matches the incoming data. - errorBlobInfo := types.BlobInfo{ - Digest: "", - Size: -1, - } - // Set up to digest the blob and count its size while saving it to a file. - hasher := digest.Canonical.Digester() - if blobinfo.Digest.Validate() == nil { - if a := blobinfo.Digest.Algorithm(); a.Available() { - hasher = a.Digester() - } - } - diffID := digest.Canonical.Digester() - filename := s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) - } - defer file.Close() - counter := ioutils.NewWriteCounter(hasher.Hash()) - reader := io.TeeReader(io.TeeReader(stream, counter), file) - decompressed, err := archive.DecompressStream(reader) - if err != nil { - return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") - } - // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - _, err = io.Copy(diffID.Hash(), decompressed) - decompressed.Close() - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) - } - // Ensure that any information that we were given about the blob is correct. - if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { - return errorBlobInfo, ErrBlobDigestMismatch - } - if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - return errorBlobInfo, ErrBlobSizeMismatch - } - // Record information about the blob. - s.putBlobMutex.Lock() - s.blobDiffIDs[hasher.Digest()] = diffID.Digest() - s.fileSizes[hasher.Digest()] = counter.Count - s.filenames[hasher.Digest()] = filename - s.putBlobMutex.Unlock() - blobDigest := blobinfo.Digest - if blobDigest.Validate() != nil { - blobDigest = hasher.Digest() - } - blobSize := blobinfo.Size - if blobSize < 0 { - blobSize = counter.Count - } - // This is safe because we have just computed both values ourselves. - cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) - return types.BlobInfo{ - Digest: blobDigest, - Size: blobSize, - MediaType: blobinfo.MediaType, - }, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - // lock the entire method as it executes fairly quickly - s.putBlobMutex.Lock() - defer s.putBlobMutex.Unlock() - if blobinfo.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) - } - if err := blobinfo.Digest.Validate(); err != nil { - return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) - } - - // Check if we've already cached it in a file. - if size, ok := s.fileSizes[blobinfo.Digest]; ok { - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: size, - MediaType: blobinfo.MediaType, - }, nil - } - - // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Save this for completeness. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - - // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].CompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - - // Does the blob correspond to a known DiffID which we already have available? - // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the - // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. - if canSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest) - } - if len(layers) > 0 { - if blobinfo.Size != -1 { - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, blobinfo, nil - } - if !canSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) - } - s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: uncompressedDigest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - } - } - - // Nope, we don't have it. - return false, types.BlobInfo{}, nil -} - -// computeID computes a recommended image ID based on information we have so far. If -// the manifest is not of a type that we recognize, we return an empty value, indicating -// that since we don't have a recommendation, a random ID should be used if one needs -// to be allocated. -func (s *storageImageDestination) computeID(m manifest.Manifest) string { - // Build the diffID list. We need the decompressed sums that we've been calculating to - // fill in the DiffIDs. It's expected (but not enforced by us) that the number of - // diffIDs corresponds to the number of non-EmptyLayer entries in the history. - var diffIDs []digest.Digest - switch m := m.(type) { - case *manifest.Schema1: - // Build a list of the diffIDs we've generated for the non-throwaway FS layers, - // in reverse of the order in which they were originally listed. - for i, compat := range m.ExtractedV1Compatibility { - if compat.ThrowAway { - continue - } - blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.blobDiffIDs[blobSum] - if !ok { - logrus.Infof("error looking up diffID for layer %q", blobSum.String()) - return "" - } - diffIDs = append([]digest.Digest{diffID}, diffIDs...) - } - case *manifest.Schema2, *manifest.OCI1: - // We know the ID calculation for these formats doesn't actually use the diffIDs, - // so we don't need to populate the diffID list. - default: - return "" - } - id, err := m.ImageID(diffIDs) - if err != nil { - return "" - } - return id -} - -// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig -// information out of it for Inspect(). -func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { - if info.Digest == "" { - return nil, errors.Errorf(`no digest supplied when reading blob`) - } - if err := info.Digest.Validate(); err != nil { - return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) - } - // Assume it's a file, since we're only calling this from a place that expects to read files. - if filename, ok := s.filenames[info.Digest]; ok { - contents, err2 := ioutil.ReadFile(filename) - if err2 != nil { - return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) - } - return contents, nil - } - // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. - return nil, errors.New("blob not found") -} - -func (s *storageImageDestination) Commit(ctx context.Context) error { - // Find the list of layer blobs. - if len(s.manifest) == 0 { - return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") - } - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) - if err != nil { - return errors.Wrapf(err, "error parsing manifest") - } - layerBlobs := man.LayerInfos() - // Extract or find the layers. - lastLayer := "" - for _, blob := range layerBlobs { - if blob.EmptyLayer { - continue - } - - // Check if there's already a layer with the ID that we'd give to the result of applying - // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - diffID, haveDiffID := s.blobDiffIDs[blob.Digest] - if !haveDiffID { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), - // or to even check if we had it. - // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller - // that relies on using a blob digest that has never been seeen by the store had better call - // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only - // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) - if err != nil { - return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) - } - if !has { - return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) - } - diffID, haveDiffID = s.blobDiffIDs[blob.Digest] - if !haveDiffID { - return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) - } - } - id := diffID.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() - } - if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { - // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - continue - } - // Check if we previously cached a file with that blob's contents. If we didn't, - // then we need to read the desired contents from a layer. - filename, ok := s.filenames[blob.Digest] - if !ok { - // Try to find the layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } - } - if layer == "" { - return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) - } - // Read the layer's contents. - noCompression := archive.Uncompressed - diffOptions := &storage.DiffOptions{ - Compression: &noCompression, - } - diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) - if err2 != nil { - return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) - } - // Copy the layer diff to a file. Diff() takes a lock that it holds - // until the ReadCloser that it returns is closed, and PutLayer() wants - // the same lock, so the diff can't just be directly streamed from one - // to the other. - filename = s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - diff.Close() - return errors.Wrapf(err, "error creating temporary file %q", filename) - } - // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using - // ctx.Done(). - _, err = io.Copy(file, diff) - diff.Close() - file.Close() - if err != nil { - return errors.Wrapf(err, "error storing blob to file %q", filename) - } - // Make sure that we can find this file later, should we need the layer's - // contents again. - s.filenames[blob.Digest] = filename - } - // Read the cached blob and use it as a diff. - file, err := os.Open(filename) - if err != nil { - return errors.Wrapf(err, "error opening file %q", filename) - } - defer file.Close() - // Build the new layer using the diff, regardless of where it came from. - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) - } - lastLayer = layer.ID - } - - // If one of those blobs was a configuration blob, then we can try to dig out the date when the image - // was originally created, in case we're just copying it. If not, no harm done. - options := &storage.ImageOptions{} - if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { - logrus.Debugf("setting image creation date to %s", inspect.Created) - options.CreationDate = *inspect.Created - } - // Create the image record, pointing to the most-recently added layer. - intendedID := s.imageRef.id - if intendedID == "" { - intendedID = s.computeID(man) - } - oldNames := []string{} - img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) - if err != nil { - if errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", intendedID) - } - img, err = s.imageRef.transport.store.Image(intendedID) - if err != nil { - return errors.Wrapf(err, "error reading image %q", intendedID) - } - if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) - } - logrus.Debugf("reusing image ID %q", img.ID) - oldNames = append(oldNames, img.Names...) - } else { - logrus.Debugf("created new image ID %q", img.ID) - } - // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := make(map[digest.Digest]struct{}) - for blob := range s.filenames { - dataBlobs[blob] = struct{}{} - } - for _, layerBlob := range layerBlobs { - delete(dataBlobs, layerBlob.Digest) - } - for blob := range dataBlobs { - v, err := ioutil.ReadFile(s.filenames[blob]) - if err != nil { - return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) - return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) - } - } - // Set the reference's name on the image. - if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { - names := []string{} - if name != nil { - names = append(names, name.String()) - } - if len(oldNames) > 0 { - names = append(names, oldNames...) - } - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) - return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) - } - logrus.Debugf("set names of image %q to %v", img.ID, names) - } - // Save the manifest. Allow looking it up by digest by using the key convention defined by the Store. - // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, - // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. - manifestDigest, err := manifest.Digest(s.manifest) - if err != nil { - return errors.Wrapf(err, "error computing manifest digest") - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return err - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return err - } - // Save the signatures, if we have any. - if len(s.signatures) > 0 { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err - } - } - // Save our metadata. - metadata, err := json.Marshal(s) - if err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) - return err - } - if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) - return err - } - logrus.Debugf("saved image metadata %q", string(metadata)) - } - return nil -} - -var manifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, -} - -func (s *storageImageDestination) SupportedManifestMIMETypes() []string { - return manifestMIMETypes -} - -// PutManifest writes the manifest to the destination. -func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { - if s.imageRef.named != nil { - if digested, ok := s.imageRef.named.(reference.Digested); ok { - matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest()) - if err != nil { - return err - } - if !matches { - return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest()) - } - } - } - - s.manifest = make([]byte, len(manifestBlob)) - copy(s.manifest, manifestBlob) - return nil -} - -// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was -// previously supplied to PutSignatures(). -func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be -// uploaded to the image destination, true otherwise. -func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (s *storageImageDestination) MustMatchRuntimeOS() bool { - return true -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { - return true // Yes, we want the unmodified manifest -} - -// PutSignatures records the image's signatures for committing as a single data blob. -func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - sizes := []int{} - sigblob := []byte{} - for _, sig := range signatures { - sizes = append(sizes, len(sig)) - newblob := make([]byte, len(sigblob)+len(sig)) - copy(newblob, sigblob) - copy(newblob[len(sigblob):], sig) - sigblob = newblob - } - s.signatures = sigblob - s.SignatureSizes = sizes - return nil -} - -// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageSource) getSize() (int64, error) { - var sum int64 - // Size up the data blobs. - dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) - if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.image.ID) - } - for _, dataName := range dataNames { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) - if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.image.ID) - } - sum += bigSize - } - // Add the signature sizes. - for _, sigSize := range s.SignatureSizes { - sum += int64(sigSize) - } - // Walk the layer list. - layerID := s.image.TopLayer - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layer.UncompressedSize - if layer.Parent == "" { - break - } - layerID = layer.Parent - } - return sum, nil -} - -// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageSource) Size() (int64, error) { - return s.getSize() -} - -// Size() returns the previously-computed size of the image, with no error. -func (s *storageImageCloser) Size() (int64, error) { - return s.size, nil -} - -// newImage creates an image that also knows its size -func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { - src, err := newImageSource(s) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, src) - if err != nil { - return nil, err - } - size, err := src.getSize() - if err != nil { - return nil, err - } - return &storageImageCloser{ImageCloser: img, size: size}, nil -} diff --git a/vendor/github.com/containers/image/v4/storage/storage_reference.go b/vendor/github.com/containers/image/v4/storage/storage_reference.go deleted file mode 100644 index 7ad20817b..000000000 --- a/vendor/github.com/containers/image/v4/storage/storage_reference.go +++ /dev/null @@ -1,225 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "context" - "strings" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/types" - "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte -// value hex-encoded into a 64-character string, and a reference to a Store -// where an image is, or would be, kept. -// Either "named" or "id" must be set. -type storageReference struct { - transport storageTransport - named reference.Named // may include a tag and/or a digest - id string -} - -func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { - if named == nil && id == "" { - return nil, ErrInvalidReference - } - // We take a copy of the transport, which contains a pointer to the - // store that it used for resolving this reference, so that the - // transport that we'll return from Transport() won't be affected by - // further calls to the original transport's SetStore() method. - return &storageReference{ - transport: transport, - named: named, - id: id, - }, nil -} - -// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref -func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { - repo := ref.Name() - for _, name := range image.Names { - if named, err := reference.ParseNormalizedNamed(name); err == nil { - if named.Name() == repo { - return true - } - } - } - return false -} - -// Resolve the reference's name to an image ID in the store, if there's already -// one present with the same name or ID, and return the image. -func (s *storageReference) resolveImage() (*storage.Image, error) { - var loadedImage *storage.Image - if s.id == "" && s.named != nil { - // Look for an image that has the expanded reference name as an explicit Name value. - image, err := s.transport.store.Image(s.named.String()) - if image != nil && err == nil { - loadedImage = image - s.id = image.ID - } - } - if s.id == "" && s.named != nil { - if digested, ok := s.named.(reference.Digested); ok { - // Look for an image with the specified digest that has the same name, - // though possibly with a different tag or digest, as a Name value, so - // that the canonical reference can be implicitly resolved to the image. - images, err := s.transport.store.ImagesByDigest(digested.Digest()) - if err == nil && len(images) > 0 { - for _, image := range images { - if imageMatchesRepo(image, s.named) { - loadedImage = image - s.id = image.ID - break - } - } - } - } - } - if s.id == "" { - logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) - return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) - } - if loadedImage == nil { - img, err := s.transport.store.Image(s.id) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", s.id) - } - loadedImage = img - } - if s.named != nil { - if !imageMatchesRepo(loadedImage, s.named) { - logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) - return nil, ErrNoSuchImage - } - } - // Default to having the image digest that we hand back match the most recently - // added manifest... - if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { - loadedImage.Digest = digest - } - // ... unless the named reference says otherwise, and it matches one of the digests - // in the image. For those cases, set the Digest field to that value, for the - // sake of older consumers that don't know there's a whole list in there now. - if s.named != nil { - if digested, ok := s.named.(reference.Digested); ok { - for _, digest := range loadedImage.Digests { - if digest == digested.Digest() { - loadedImage.Digest = digest - break - } - } - } - } - return loadedImage, nil -} - -// Return a Transport object that defaults to using the same store that we used -// to build this reference object. -func (s storageReference) Transport() types.ImageTransport { - return &storageTransport{ - store: s.transport.store, - defaultUIDMap: s.transport.defaultUIDMap, - defaultGIDMap: s.transport.defaultGIDMap, - } -} - -// Return a name with a tag or digest, if we have either, else return it bare. -func (s storageReference) DockerReference() reference.Named { - return s.named -} - -// Return a name with a tag, prefixed with the graph root and driver name, to -// disambiguate between images which may be present in multiple stores and -// share only their names. -func (s storageReference) StringWithinTransport() string { - optionsList := "" - options := s.transport.store.GraphOptions() - if len(options) > 0 { - optionsList = ":" + strings.Join(options, ",") - } - res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.named != nil { - res = res + s.named.String() - } - if s.id != "" { - res = res + "@" + s.id - } - return res -} - -func (s storageReference) PolicyConfigurationIdentity() string { - res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - if s.named != nil { - res = res + s.named.String() - } - if s.id != "" { - res = res + "@" + s.id - } - return res -} - -// Also accept policy that's tied to the combination of the graph root and -// driver name, to apply to all images stored in the Store, and to just the -// graph root, in case we're using multiple drivers in the same directory for -// some reason. -func (s storageReference) PolicyConfigurationNamespaces() []string { - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" - namespaces := []string{} - if s.named != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.named.String()) - } - tagged, isTagged := s.named.(reference.Tagged) - _, isDigested := s.named.(reference.Digested) - if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. - namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) - } - components := strings.Split(s.named.Name(), "/") - for len(components) > 0 { - namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) - components = components[:len(components)-1] - } - } - namespaces = append(namespaces, storeSpec) - namespaces = append(namespaces, driverlessStoreSpec) - return namespaces -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - return newImage(ctx, sys, s) -} - -func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - img, err := s.resolveImage() - if err != nil { - return err - } - layers, err := s.transport.store.DeleteImage(img.ID, true) - if err == nil { - logrus.Debugf("deleted image %q", img.ID) - for _, layer := range layers { - logrus.Debugf("deleted layer %q", layer) - } - } - return err -} - -func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(s) -} - -func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(s) -} diff --git a/vendor/github.com/containers/image/v4/storage/storage_transport.go b/vendor/github.com/containers/image/v4/storage/storage_transport.go deleted file mode 100644 index 48b909c03..000000000 --- a/vendor/github.com/containers/image/v4/storage/storage_transport.go +++ /dev/null @@ -1,366 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -const ( - minimumTruncatedIDLength = 3 -) - -func init() { - transports.Register(Transport) -} - -var ( - // Transport is an ImageTransport that uses either a default - // storage.Store or one that's it's explicitly told to use. - Transport StoreTransport = &storageTransport{} - // ErrInvalidReference is returned when ParseReference() is passed an - // empty reference. - ErrInvalidReference = errors.New("invalid reference") - // ErrPathNotAbsolute is returned when a graph root is not an absolute - // path name. - ErrPathNotAbsolute = errors.New("path name is not absolute") -) - -// StoreTransport is an ImageTransport that uses a storage.Store to parse -// references, either its own default or one that it's told to use. -type StoreTransport interface { - types.ImageTransport - // SetStore sets the default store for this transport. - SetStore(storage.Store) - // GetImage retrieves the image from the transport's store that's named - // by the reference. - GetImage(types.ImageReference) (*storage.Image, error) - // GetStoreImage retrieves the image from a specified store that's named - // by the reference. - GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) - // ParseStoreReference parses a reference, overriding any store - // specification that it may contain. - ParseStoreReference(store storage.Store, reference string) (*storageReference, error) - // SetDefaultUIDMap sets the default UID map to use when opening stores. - SetDefaultUIDMap(idmap []idtools.IDMap) - // SetDefaultGIDMap sets the default GID map to use when opening stores. - SetDefaultGIDMap(idmap []idtools.IDMap) - // DefaultUIDMap returns the default UID map used when opening stores. - DefaultUIDMap() []idtools.IDMap - // DefaultGIDMap returns the default GID map used when opening stores. - DefaultGIDMap() []idtools.IDMap -} - -type storageTransport struct { - store storage.Store - defaultUIDMap []idtools.IDMap - defaultGIDMap []idtools.IDMap -} - -func (s *storageTransport) Name() string { - // Still haven't really settled on a name. - return "containers-storage" -} - -// SetStore sets the Store object which the Transport will use for parsing -// references when information about a Store is not directly specified as part -// of the reference. If one is not set, the library will attempt to initialize -// one with default settings when a reference needs to be parsed. Calling -// SetStore does not affect previously parsed references. -func (s *storageTransport) SetStore(store storage.Store) { - s.store = store -} - -// SetDefaultUIDMap sets the default UID map to use when opening stores. -func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { - s.defaultUIDMap = idmap -} - -// SetDefaultGIDMap sets the default GID map to use when opening stores. -func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { - s.defaultGIDMap = idmap -} - -// DefaultUIDMap returns the default UID map used when opening stores. -func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { - return s.defaultUIDMap -} - -// DefaultGIDMap returns the default GID map used when opening stores. -func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { - return s.defaultGIDMap -} - -// ParseStoreReference takes a name or an ID, tries to figure out which it is -// relative to the given store, and returns it in a reference object. -func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { - if ref == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) - } - if ref[0] == '[' { - // Ignore the store specifier. - closeIndex := strings.IndexRune(ref, ']') - if closeIndex < 1 { - return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) - } - ref = ref[closeIndex+1:] - } - - // The reference may end with an image ID. Image IDs and digests use the same "@" separator; - // here we only peel away an image ID, and leave digests alone. - split := strings.LastIndex(ref, "@") - id := "" - if split != -1 { - possibleID := ref[split+1:] - if possibleID == "" { - return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) - } - // If it looks like a digest, leave it alone for now. - if _, err := digest.Parse(possibleID); err != nil { - // Otherwise… - if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { - id = possibleID // … it is a full ID - } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { - // … it is a truncated version of the ID of an image that's present in local storage, - // so we might as well use the expanded value. - id = img.ID - } else { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) - } - // We have recognized an image ID; peel it off. - ref = ref[:split] - } - } - - // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's - // at least of what we guess is a reasonable minimum length, because we don't want a really short value - // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. - if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { - if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { - // It's a truncated version of the ID of an image that's present in local storage; - // we need to expand it. - id = img.ID - ref = "" - } - } - - var named reference.Named - // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been - // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. - if ref != "" { - var err error - named, err = reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, errors.Wrapf(err, "error parsing named reference %q", ref) - } - named = reference.TagNameOnly(named) - } - - result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) - if err != nil { - return nil, err - } - logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) - return result, nil -} - -func (s *storageTransport) GetStore() (storage.Store, error) { - // Return the transport's previously-set store. If we don't have one - // of those, initialize one now. - if s.store == nil { - options, err := storage.DefaultStoreOptionsAutoDetectUID() - if err != nil { - return nil, err - } - options.UIDMap = s.defaultUIDMap - options.GIDMap = s.defaultGIDMap - store, err := storage.GetStore(options) - if err != nil { - return nil, err - } - s.store = store - } - return s.store, nil -} - -// ParseReference takes a name and a tag or digest and/or ID -// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), -// possibly prefixed with a store specifier in the form "[_graphroot_]" or -// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or -// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", -// tries to figure out which it is, and returns it in a reference object. -// If _id_ is the ID of an image that's present in local storage, it can be truncated, and -// even be specified as if it were a _name_, value. -func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { - var store storage.Store - // Check if there's a store location prefix. If there is, then it - // needs to match a store that was previously initialized using - // storage.GetStore(), or be enough to let the storage library fill out - // the rest using knowledge that it has from elsewhere. - if reference[0] == '[' { - closeIndex := strings.IndexRune(reference, ']') - if closeIndex < 1 { - return nil, ErrInvalidReference - } - storeSpec := reference[1:closeIndex] - reference = reference[closeIndex+1:] - // Peel off a "driver@" from the start. - driverInfo := "" - driverSplit := strings.SplitN(storeSpec, "@", 2) - if len(driverSplit) != 2 { - if storeSpec == "" { - return nil, ErrInvalidReference - } - } else { - driverInfo = driverSplit[0] - if driverInfo == "" { - return nil, ErrInvalidReference - } - storeSpec = driverSplit[1] - if storeSpec == "" { - return nil, ErrInvalidReference - } - } - // Peel off a ":options" from the end. - var options []string - optionsSplit := strings.SplitN(storeSpec, ":", 2) - if len(optionsSplit) == 2 { - options = strings.Split(optionsSplit[1], ",") - storeSpec = optionsSplit[0] - } - // Peel off a "+runroot" from the new end. - runRootInfo := "" - runRootSplit := strings.SplitN(storeSpec, "+", 2) - if len(runRootSplit) == 2 { - runRootInfo = runRootSplit[1] - storeSpec = runRootSplit[0] - } - // The rest is our graph root. - rootInfo := storeSpec - // Check that any paths are absolute paths. - if rootInfo != "" && !filepath.IsAbs(rootInfo) { - return nil, ErrPathNotAbsolute - } - if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { - return nil, ErrPathNotAbsolute - } - store2, err := storage.GetStore(storage.StoreOptions{ - GraphDriverName: driverInfo, - GraphRoot: rootInfo, - RunRoot: runRootInfo, - GraphDriverOptions: options, - UIDMap: s.defaultUIDMap, - GIDMap: s.defaultGIDMap, - }) - if err != nil { - return nil, err - } - store = store2 - } else { - // We didn't have a store spec, so use the default. - store2, err := s.GetStore() - if err != nil { - return nil, err - } - store = store2 - } - return s.ParseStoreReference(store, reference) -} - -func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { - dref := ref.DockerReference() - if dref != nil { - if img, err := store.Image(dref.String()); err == nil { - return img, nil - } - } - if sref, ok := ref.(*storageReference); ok { - tmpRef := *sref - if img, err := tmpRef.resolveImage(); err == nil { - return img, nil - } - } - return nil, storage.ErrImageUnknown -} - -func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { - store, err := s.GetStore() - if err != nil { - return nil, err - } - return s.GetStoreImage(store, ref) -} - -func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { - // Check that there's a store location prefix. Values we're passed are - // expected to come from PolicyConfigurationIdentity or - // PolicyConfigurationNamespaces, so if there's no store location, - // something's wrong. - if scope[0] != '[' { - return ErrInvalidReference - } - // Parse the store location prefix. - closeIndex := strings.IndexRune(scope, ']') - if closeIndex < 1 { - return ErrInvalidReference - } - storeSpec := scope[1:closeIndex] - scope = scope[closeIndex+1:] - storeInfo := strings.SplitN(storeSpec, "@", 2) - if len(storeInfo) == 1 && storeInfo[0] != "" { - // One component: the graph root. - if !filepath.IsAbs(storeInfo[0]) { - return ErrPathNotAbsolute - } - } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { - // Two components: the driver type and the graph root. - if !filepath.IsAbs(storeInfo[1]) { - return ErrPathNotAbsolute - } - } else { - // Anything else: scope specified in a form we don't - // recognize. - return ErrInvalidReference - } - // That might be all of it, and that's okay. - if scope == "" { - return nil - } - - fields := strings.SplitN(scope, "@", 3) - switch len(fields) { - case 1: // name only - case 2: // name:tag@ID or name[:tag]@digest - if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { - if _, digestErr := digest.Parse(fields[1]); digestErr != nil { - return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) - } - } - case 3: // name[:tag]@digest@ID - if _, err := digest.Parse(fields[1]); err != nil { - return err - } - if _, err := digest.Parse("sha256:" + fields[2]); err != nil { - return err - } - default: // Coverage: This should never happen - return errors.New("Internal error: unexpected number of fields form strings.SplitN") - } - // As for field[0], if it is non-empty at all: - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} diff --git a/vendor/github.com/containers/image/v4/tarball/doc.go b/vendor/github.com/containers/image/v4/tarball/doc.go deleted file mode 100644 index ebbe156bd..000000000 --- a/vendor/github.com/containers/image/v4/tarball/doc.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package tarball provides a way to generate images using one or more layer -// tarballs and an optional template configuration. -// -// An example: -// package main -// -// import ( -// "fmt" -// -// cp "github.com/containers/image/v4/copy" -// "github.com/containers/image/v4/tarball" -// "github.com/containers/image/v4/transports/alltransports" -// -// imgspecv1 "github.com/containers/image/v4/transports/alltransports" -// ) -// -// func imageFromTarball() { -// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") -// // - or - -// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") -// if err != nil { -// panic(err) -// } -// updater, ok := src.(tarball.ConfigUpdater) -// if !ok { -// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") -// } -// config := imgspecv1.Image{ -// Config: imgspecv1.ImageConfig{ -// Cmd: []string{"/bin/bash"}, -// }, -// } -// annotations := make(map[string]string) -// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" -// err = updater.ConfigUpdate(config, annotations) -// if err != nil { -// panic(err) -// } -// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") -// if err != nil { -// panic(err) -// } -// err = cp.Image(nil, dest, src, nil) -// if err != nil { -// panic(err) -// } -// } -package tarball diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_reference.go b/vendor/github.com/containers/image/v4/tarball/tarball_reference.go deleted file mode 100644 index d33c20de1..000000000 --- a/vendor/github.com/containers/image/v4/tarball/tarball_reference.go +++ /dev/null @@ -1,94 +0,0 @@ -package tarball - -import ( - "context" - "fmt" - "os" - "strings" - - "github.com/containers/image/v4/docker/reference" - "github.com/containers/image/v4/image" - "github.com/containers/image/v4/types" - - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ConfigUpdater is an interface that ImageReferences for "tarball" images also -// implement. It can be used to set values for a configuration, and to set -// image annotations which will be present in the images returned by the -// reference's NewImage() or NewImageSource() methods. -type ConfigUpdater interface { - ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error -} - -type tarballReference struct { - transport types.ImageTransport - config imgspecv1.Image - annotations map[string]string - filenames []string - stdin []byte -} - -// ConfigUpdate updates the image's default configuration and adds annotations -// which will be visible in source images created using this reference. -func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { - r.config = config - if r.annotations == nil { - r.annotations = make(map[string]string) - } - for k, v := range annotations { - r.annotations[k] = v - } - return nil -} - -func (r *tarballReference) Transport() types.ImageTransport { - return r.transport -} - -func (r *tarballReference) StringWithinTransport() string { - return strings.Join(r.filenames, ":") -} - -func (r *tarballReference) DockerReference() reference.Named { - return nil -} - -func (r *tarballReference) PolicyConfigurationIdentity() string { - return "" -} - -func (r *tarballReference) PolicyConfigurationNamespaces() []string { - return nil -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := r.NewImageSource(ctx, sys) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, src) - if err != nil { - src.Close() - return nil, err - } - return img, nil -} - -func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - for _, filename := range r.filenames { - if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing %q: %v", filename, err) - } - } - return nil -} - -func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) -} diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_src.go b/vendor/github.com/containers/image/v4/tarball/tarball_src.go deleted file mode 100644 index ead1a50bd..000000000 --- a/vendor/github.com/containers/image/v4/tarball/tarball_src.go +++ /dev/null @@ -1,268 +0,0 @@ -package tarball - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" - "time" - - "github.com/containers/image/v4/types" - "github.com/klauspost/pgzip" - digest "github.com/opencontainers/go-digest" - imgspecs "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type tarballImageSource struct { - reference tarballReference - filenames []string - diffIDs []digest.Digest - diffSizes []int64 - blobIDs []digest.Digest - blobSizes []int64 - blobTypes []string - config []byte - configID digest.Digest - configSize int64 - manifest []byte -} - -func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - // Gather up the digests, sizes, and date information for all of the files. - filenames := []string{} - diffIDs := []digest.Digest{} - diffSizes := []int64{} - blobIDs := []digest.Digest{} - blobSizes := []int64{} - blobTimes := []time.Time{} - blobTypes := []string{} - for _, filename := range r.filenames { - var file *os.File - var err error - var blobSize int64 - var blobTime time.Time - var reader io.Reader - if filename == "-" { - blobSize = int64(len(r.stdin)) - blobTime = time.Now() - reader = bytes.NewReader(r.stdin) - } else { - file, err = os.Open(filename) - if err != nil { - return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) - } - defer file.Close() - reader = file - fileinfo, err := file.Stat() - if err != nil { - return nil, fmt.Errorf("error reading size of %q: %v", filename, err) - } - blobSize = fileinfo.Size() - blobTime = fileinfo.ModTime() - } - - // Default to assuming the layer is compressed. - layerType := imgspecv1.MediaTypeImageLayerGzip - - // Set up to digest the file as it is. - blobIDdigester := digest.Canonical.Digester() - reader = io.TeeReader(reader, blobIDdigester.Hash()) - - // Set up to digest the file after we maybe decompress it. - diffIDdigester := digest.Canonical.Digester() - uncompressed, err := pgzip.NewReader(reader) - if err == nil { - // It is compressed, so the diffID is the digest of the uncompressed version - reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) - } else { - // It is not compressed, so the diffID and the blobID are going to be the same - diffIDdigester = blobIDdigester - layerType = imgspecv1.MediaTypeImageLayer - uncompressed = nil - } - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - n, err := io.Copy(ioutil.Discard, reader) - if err != nil { - return nil, fmt.Errorf("error reading %q: %v", filename, err) - } - if uncompressed != nil { - uncompressed.Close() - } - - // Grab our uncompressed and possibly-compressed digests and sizes. - filenames = append(filenames, filename) - diffIDs = append(diffIDs, diffIDdigester.Digest()) - diffSizes = append(diffSizes, n) - blobIDs = append(blobIDs, blobIDdigester.Digest()) - blobSizes = append(blobSizes, blobSize) - blobTimes = append(blobTimes, blobTime) - blobTypes = append(blobTypes, layerType) - } - - // Build the rootfs and history for the configuration blob. - rootfs := imgspecv1.RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - created := time.Time{} - history := []imgspecv1.History{} - // Pick up the layer comment from the configuration's history list, if one is set. - comment := "imported from tarball" - if len(r.config.History) > 0 && r.config.History[0].Comment != "" { - comment = r.config.History[0].Comment - } - for i := range diffIDs { - createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) - history = append(history, imgspecv1.History{ - Created: &blobTimes[i], - CreatedBy: createdBy, - Comment: comment, - }) - // Use the mtime of the most recently modified file as the image's creation time. - if created.Before(blobTimes[i]) { - created = blobTimes[i] - } - } - - // Pick up other defaults from the config in the reference. - config := r.config - if config.Created == nil { - config.Created = &created - } - if config.Architecture == "" { - config.Architecture = runtime.GOARCH - } - if config.OS == "" { - config.OS = runtime.GOOS - } - config.RootFS = rootfs - config.History = history - - // Encode and digest the image configuration blob. - configBytes, err := json.Marshal(&config) - if err != nil { - return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) - } - configID := digest.Canonical.FromBytes(configBytes) - configSize := int64(len(configBytes)) - - // Populate a manifest with the configuration blob and the file as the single layer. - layerDescriptors := []imgspecv1.Descriptor{} - for i := range blobIDs { - layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ - Digest: blobIDs[i], - Size: blobSizes[i], - MediaType: blobTypes[i], - }) - } - annotations := make(map[string]string) - for k, v := range r.annotations { - annotations[k] = v - } - manifest := imgspecv1.Manifest{ - Versioned: imgspecs.Versioned{ - SchemaVersion: 2, - }, - Config: imgspecv1.Descriptor{ - Digest: configID, - Size: configSize, - MediaType: imgspecv1.MediaTypeImageConfig, - }, - Layers: layerDescriptors, - Annotations: annotations, - } - - // Encode the manifest. - manifestBytes, err := json.Marshal(&manifest) - if err != nil { - return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) - } - - // Return the image. - src := &tarballImageSource{ - reference: *r, - filenames: filenames, - diffIDs: diffIDs, - diffSizes: diffSizes, - blobIDs: blobIDs, - blobSizes: blobSizes, - blobTypes: blobTypes, - config: configBytes, - configID: configID, - configSize: configSize, - manifest: manifestBytes, - } - - return src, nil -} - -func (is *tarballImageSource) Close() error { - return nil -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (is *tarballImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - // We should only be asked about things in the manifest. Maybe the configuration blob. - if blobinfo.Digest == is.configID { - return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil - } - // Maybe one of the layer blobs. - for i := range is.blobIDs { - if blobinfo.Digest == is.blobIDs[i] { - // We want to read that layer: open the file or memory block and hand it back. - if is.filenames[i] == "-" { - return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil - } - reader, err := os.Open(is.filenames[i]) - if err != nil { - return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) - } - return reader, is.blobSizes[i], nil - } - } - return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) - } - return is.manifest, imgspecv1.MediaTypeImageManifest, nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) - } - return nil, nil -} - -func (is *tarballImageSource) Reference() types.ImageReference { - return &is.reference -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (*tarballImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_transport.go b/vendor/github.com/containers/image/v4/tarball/tarball_transport.go deleted file mode 100644 index 84874cfbf..000000000 --- a/vendor/github.com/containers/image/v4/tarball/tarball_transport.go +++ /dev/null @@ -1,66 +0,0 @@ -package tarball - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" -) - -const ( - transportName = "tarball" - separator = ":" -) - -var ( - // Transport implements the types.ImageTransport interface for "tarball:" images, - // which are makeshift images constructed using one or more possibly-compressed tar - // archives. - Transport = &tarballTransport{} -) - -type tarballTransport struct { -} - -func (t *tarballTransport) Name() string { - return transportName -} - -func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { - var stdin []byte - var err error - filenames := strings.Split(reference, separator) - for _, filename := range filenames { - if filename == "-" { - stdin, err = ioutil.ReadAll(os.Stdin) - if err != nil { - return nil, fmt.Errorf("error buffering stdin: %v", err) - } - continue - } - f, err := os.Open(filename) - if err != nil { - return nil, fmt.Errorf("error opening %q: %v", filename, err) - } - f.Close() - } - ref := &tarballReference{ - transport: t, - filenames: filenames, - stdin: stdin, - } - return ref, nil -} - -func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - return errors.New(`tarball: does not support any scopes except the default "" one`) -} - -func init() { - transports.Register(Transport) -} diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go deleted file mode 100644 index ae68fb8e6..000000000 --- a/vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go +++ /dev/null @@ -1,46 +0,0 @@ -package alltransports - -import ( - "strings" - - // register all known transports - // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating - // a transport. - _ "github.com/containers/image/v4/directory" - _ "github.com/containers/image/v4/docker" - _ "github.com/containers/image/v4/docker/archive" - _ "github.com/containers/image/v4/oci/archive" - _ "github.com/containers/image/v4/oci/layout" - _ "github.com/containers/image/v4/openshift" - _ "github.com/containers/image/v4/tarball" - // The ostree transport is registered by ostree*.go - // The storage transport is registered by storage*.go - "github.com/containers/image/v4/transports" - "github.com/containers/image/v4/types" - "github.com/pkg/errors" -) - -// ParseImageName converts a URL-like image name to a types.ImageReference. -func ParseImageName(imgName string) (types.ImageReference, error) { - // Keep this in sync with TransportFromImageName! - parts := strings.SplitN(imgName, ":", 2) - if len(parts) != 2 { - return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) - } - transport := transports.Get(parts[0]) - if transport == nil { - return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) - } - return transport.ParseReference(parts[1]) -} - -// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when -// the transport is unknown or when the input is invalid. -func TransportFromImageName(imageName string) types.ImageTransport { - // Keep this in sync with ParseImageName! - parts := strings.SplitN(imageName, ":", 2) - if len(parts) == 2 { - return transports.Get(parts[0]) - } - return nil -} diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go deleted file mode 100644 index d3fc18b2c..000000000 --- a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !containers_image_docker_daemon_stub - -package alltransports - -import ( - // Register the docker-daemon transport - _ "github.com/containers/image/v4/docker/daemon" -) diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go deleted file mode 100644 index 82e055377..000000000 --- a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build containers_image_docker_daemon_stub - -package alltransports - -import "github.com/containers/image/v4/transports" - -func init() { - transports.Register(transports.NewStubTransport("docker-daemon")) -} diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v4/transports/alltransports/ostree.go deleted file mode 100644 index 891696616..000000000 --- a/vendor/github.com/containers/image/v4/transports/alltransports/ostree.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build containers_image_ostree,linux - -package alltransports - -import ( - // Register the ostree transport - _ "github.com/containers/image/v4/ostree" -) diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go deleted file mode 100644 index 892518d5c..000000000 --- a/vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !containers_image_ostree !linux - -package alltransports - -import "github.com/containers/image/v4/transports" - -func init() { - transports.Register(transports.NewStubTransport("ostree")) -} diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/storage.go b/vendor/github.com/containers/image/v4/transports/alltransports/storage.go deleted file mode 100644 index 96f3e17fc..000000000 --- a/vendor/github.com/containers/image/v4/transports/alltransports/storage.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !containers_image_storage_stub - -package alltransports - -import ( - // Register the storage transport - _ "github.com/containers/image/v4/storage" -) diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go deleted file mode 100644 index 14c942116..000000000 --- a/vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build containers_image_storage_stub - -package alltransports - -import "github.com/containers/image/v4/transports" - -func init() { - transports.Register(transports.NewStubTransport("containers-storage")) -} diff --git a/vendor/github.com/containers/image/v4/transports/stub.go b/vendor/github.com/containers/image/v4/transports/stub.go deleted file mode 100644 index e3ee62031..000000000 --- a/vendor/github.com/containers/image/v4/transports/stub.go +++ /dev/null @@ -1,36 +0,0 @@ -package transports - -import ( - "fmt" - - "github.com/containers/image/v4/types" -) - -// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -type stubTransport string - -// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -func NewStubTransport(name string) types.ImageTransport { - return stubTransport(name) -} - -// Name returns the name of the transport, which must be unique among other transports. -func (s stubTransport) Name() string { - return string(s) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { - return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { - // Allowing any reference in here allows tools with some transports stubbed-out to still - // use signature verification policies which refer to these stubbed-out transports. - // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . - return nil -} diff --git a/vendor/github.com/containers/image/v4/transports/transports.go b/vendor/github.com/containers/image/v4/transports/transports.go deleted file mode 100644 index 8bdb46b4b..000000000 --- a/vendor/github.com/containers/image/v4/transports/transports.go +++ /dev/null @@ -1,90 +0,0 @@ -package transports - -import ( - "fmt" - "sort" - "sync" - - "github.com/containers/image/v4/types" -) - -// knownTransports is a registry of known ImageTransport instances. -type knownTransports struct { - transports map[string]types.ImageTransport - mu sync.Mutex -} - -func (kt *knownTransports) Get(k string) types.ImageTransport { - kt.mu.Lock() - t := kt.transports[k] - kt.mu.Unlock() - return t -} - -func (kt *knownTransports) Remove(k string) { - kt.mu.Lock() - delete(kt.transports, k) - kt.mu.Unlock() -} - -func (kt *knownTransports) Add(t types.ImageTransport) { - kt.mu.Lock() - defer kt.mu.Unlock() - name := t.Name() - if t := kt.transports[name]; t != nil { - panic(fmt.Sprintf("Duplicate image transport name %s", name)) - } - kt.transports[name] = t -} - -var kt *knownTransports - -func init() { - kt = &knownTransports{ - transports: make(map[string]types.ImageTransport), - } -} - -// Get returns the transport specified by name or nil when unavailable. -func Get(name string) types.ImageTransport { - return kt.Get(name) -} - -// Delete deletes a transport from the registered transports. -func Delete(name string) { - kt.Remove(name) -} - -// Register registers a transport. -func Register(t types.ImageTransport) { - kt.Add(t) -} - -// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that -// ParseImageName(ImageName(reference)) returns an equivalent reference. -// -// This is the generally recommended way to refer to images in the UI. -// -// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -func ImageName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.StringWithinTransport() -} - -// ListNames returns a list of non deprecated transport names. -// Deprecated transports can be used, but are not presented to users. -func ListNames() []string { - kt.mu.Lock() - defer kt.mu.Unlock() - deprecated := map[string]bool{ - "atomic": true, - } - var names []string - for _, transport := range kt.transports { - if !deprecated[transport.Name()] { - names = append(names, transport.Name()) - } - } - sort.Strings(names) - return names -} diff --git a/vendor/github.com/containers/image/v4/types/types.go b/vendor/github.com/containers/image/v4/types/types.go deleted file mode 100644 index af11a2b21..000000000 --- a/vendor/github.com/containers/image/v4/types/types.go +++ /dev/null @@ -1,535 +0,0 @@ -package types - -import ( - "context" - "io" - "time" - - "github.com/containers/image/v4/docker/reference" - compression "github.com/containers/image/v4/pkg/compression/types" - digest "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ImageTransport is a top-level namespace for ways to to store/load an image. -// It should generally correspond to ImageSource/ImageDestination implementations. -// -// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. -// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS -// (or, even, IPv4 or IPv6). -// -// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. -// For example, several different ImageTransport implementations may be based on local filesystem paths, -// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) -// -// See also transports.KnownTransports. -type ImageTransport interface { - // Name returns the name of the transport, which must be unique among other transports. - Name() string - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. - ParseReference(reference string) (ImageReference, error) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys - // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). - // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. - // scope passed to this function will not be "", that value is always allowed. - ValidatePolicyConfigurationScope(scope string) error -} - -// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. -// -// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening -// within an ImageTransport.ParseReference() or equivalent API creating the reference object. -// That's also why the various identification/formatting methods of this type do not support returning errors. -// -// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside -// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. -type ImageReference interface { - Transport() ImageTransport - // StringWithinTransport returns a string representation of the reference, which MUST be such that - // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. - // NOTE: The returned string is not promised to be equal to the original input to ParseReference; - // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. - // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; - // instead, see transports.ImageName(). - StringWithinTransport() string - - // DockerReference returns a Docker reference associated with this reference - // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, - // not e.g. after redirect or alias processing), or nil if unknown/not applicable. - DockerReference() reference.Named - - // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. - // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; - // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical - // (i.e. various references with exactly the same semantics should return the same configuration identity) - // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but - // not required/guaranteed that it will be a valid input to Transport().ParseReference(). - // Returns "" if configuration identities for these references are not supported. - PolicyConfigurationIdentity() string - - // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search - // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed - // in order, terminating on first match, and an implicit "" is always checked at the end. - // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), - // and each following element to be a prefix of the element preceding it. - PolicyConfigurationNamespaces() []string - - // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned ImageCloser. - // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, - // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. - NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) - // NewImageSource returns a types.ImageSource for this reference. - // The caller must call .Close() on the returned ImageSource. - NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) - // NewImageDestination returns a types.ImageDestination for this reference. - // The caller must call .Close() on the returned ImageDestination. - NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) - - // DeleteImage deletes the named image from the registry, if supported. - DeleteImage(ctx context.Context, sys *SystemContext) error -} - -// LayerCompression indicates if layers must be compressed, decompressed or preserved -type LayerCompression int - -const ( - // PreserveOriginal indicates the layer must be preserved, ie - // no compression or decompression. - PreserveOriginal LayerCompression = iota - // Decompress indicates the layer must be decompressed - Decompress - // Compress indicates the layer must be compressed - Compress -) - -// BlobInfo collects known information about a blob (layer/config). -// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. -type BlobInfo struct { - Digest digest.Digest // "" if unknown. - Size int64 // -1 if unknown - URLs []string - Annotations map[string]string - MediaType string - // CompressionOperation is used in Image.UpdateLayerInfos to instruct - // whether the original layer should be preserved or (de)compressed. The - // field defaults to preserve the original layer. - CompressionOperation LayerCompression - // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct - // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be - // set when `CompressionOperation == Compress`. - CompressionAlgorithm *compression.Algorithm -} - -// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. -// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). -// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. -// -// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different -// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, -// at least by not failing hard when encountering unknown data. -type BICTransportScope struct { - Opaque string -} - -// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. -// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob -// can look it up using BlobInfoCache.CandidateLocations. -// -// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different -// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, -// at least by not failing hard when encountering unknown data. -type BICLocationReference struct { - Opaque string -} - -// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. -type BICReplacementCandidate struct { - Digest digest.Digest - Location BICLocationReference -} - -// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies. -// -// It records two kinds of data: -// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: -// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. -// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), -// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ -// -// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known -// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). -// -// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently -// compress/decompress blobs for their own purposes. -// -// - Known blob locations, managed by individual transports: -// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), -// recording transport-specific information that allows the transport to reuse the blob in the future; -// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. -// -// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs -// can be directly reused within a registry, or mounted across registries within a registry server.) -// -// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; -// users of the cahce should just fall back to copying the blobs the usual way. -type BlobInfoCache interface { - // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. - // May return anyDigest if it is known to be uncompressed. - // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). - UncompressedDigest(anyDigest digest.Digest) digest.Digest - // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. - // It’s allowed for anyDigest == uncompressed. - // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. - // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. - // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) - RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) - - // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, - // and can be reused given the opaque location data. - RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) - // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused - // within the specified (transport scope) (if they still exist, which is not guaranteed). - // - // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, - // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same - // uncompressed digest. - CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate -} - -// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). -// This is primarily useful for copying images around; for examining their properties, Image (below) -// is usually more useful. -// Each ImageSource should eventually be closed by calling Close(). -// -// WARNING: Various methods which return an object identified by digest generally do not -// validate that the returned data actually matches that digest; this is the caller’s responsibility. -type ImageSource interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Close removes resources associated with an initialized ImageSource, if any. - Close() error - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). - // It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); - // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). - GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. - GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) - // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. - HasThreadSafeGetBlob() bool - // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy(ctx context.Context) ([]BlobInfo, error) -} - -// ImageDestination is a service, possibly remote (= slow), to store components of a single image. -// -// There is a specific required order for some of the calls: -// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) -// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) -// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. -// -// Each ImageDestination should eventually be closed by calling Close(). -type ImageDestination interface { - // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, - // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. - Reference() ImageReference - // Close removes resources associated with an initialized ImageDestination, if any. - Close() error - - // SupportedManifestMIMETypes tells which manifest mime types the destination supports - // If an empty slice or nil it's returned, then any mime type can be tried to upload - SupportedManifestMIMETypes() []string - // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. - // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. - SupportsSignatures(ctx context.Context) error - // DesiredLayerCompression indicates the kind of compression to apply on layers - DesiredLayerCompression() LayerCompression - // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually - // uploaded to the image destination, true otherwise. - AcceptsForeignLayerURLs() bool - // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. - MustMatchRuntimeOS() bool - // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), - // and would prefer to receive an unmodified manifest instead of one modified for the destination. - // Does not make a difference if Reference().DockerReference() is nil. - IgnoresEmbeddedDockerReference() bool - - // PutBlob writes contents of stream and returns data representing the result. - // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. - // inputInfo.Size is the expected length of stream, if known. - // inputInfo.MediaType describes the blob format, if known. - // May update cache. - // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available - // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) - // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. - HasThreadSafePutBlob() bool - // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination - // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). - // info.Digest must not be empty. - // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. - // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. - // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. - // May use and/or update cache. - TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) - // PutManifest writes manifest to the destination. - // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. - // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), - // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. - PutManifest(ctx context.Context, manifest []byte) error - PutSignatures(ctx context.Context, signatures [][]byte) error - // Commit marks the process of storing the image as successful and asks for the image to be persisted. - // WARNING: This does not have any transactional semantics: - // - Uploaded data MAY be visible to others before Commit() is called - // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) - Commit(ctx context.Context) error -} - -// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, -// refuses specifically this manifest type, but may accept a different manifest type. -type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. - Err error -} - -func (e ManifestTypeRejectedError) Error() string { - return e.Err.Error() -} - -// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. -// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, -// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. -// This also makes the UnparsedImage→Image conversion an explicitly visible step. -// -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -type UnparsedImage interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. - Manifest(ctx context.Context) ([]byte, string, error) - // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. - Signatures(ctx context.Context) ([][]byte, error) -} - -// Image is the primary API for inspecting properties of images. -// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The Image must not be used after the underlying ImageSource is Close()d. -type Image interface { - // Note that Reference may return nil in the return value of UpdatedImage! - UnparsedImage - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*v1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []BlobInfo - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy(context.Context) ([]BlobInfo, error) - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. - // This does not change the state of the original Image object. - UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) - // Size returns an approximation of the amount of disk space which is consumed by the image in its current - // location. If the size is not known, -1 will be returned. - Size() (int64, error) -} - -// ImageCloser is an Image with a Close() method which must be called by the user. -// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, -// to ensure that the ImageSource is closed. -type ImageCloser interface { - Image - // Close removes resources associated with an initialized ImageCloser. - Close() error -} - -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest -type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. - EmbeddedDockerReference reference.Named - ManifestMIMEType string - // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. - InformationOnly ManifestUpdateInformation -} - -// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here -// only to make writing struct literals possible. -type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) - LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) - LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. -} - -// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. -// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported -// for other manifest types. -type ImageInspectInfo struct { - Tag string - Created *time.Time - DockerVersion string - Labels map[string]string - Architecture string - Os string - Layers []string - Env []string -} - -// DockerAuthConfig contains authorization information for connecting to a registry. -// the value of Username and Password can be empty for accessing the registry anonymously -type DockerAuthConfig struct { - Username string - Password string -} - -// OptionalBool is a boolean with an additional undefined value, which is meant -// to be used in the context of user input to distinguish between a -// user-specified value and a default value. -type OptionalBool byte - -const ( - // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. - OptionalBoolUndefined OptionalBool = iota - // OptionalBoolTrue represents the boolean true. - OptionalBoolTrue - // OptionalBoolFalse represents the boolean false. - OptionalBoolFalse -) - -// NewOptionalBool converts the input bool into either OptionalBoolTrue or -// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. -func NewOptionalBool(b bool) OptionalBool { - o := OptionalBoolFalse - if b == true { - o = OptionalBoolTrue - } - return o -} - -// SystemContext allows parameterizing access to implicitly-accessed resources, -// like configuration files in /etc and users' login state in their home directory. -// Various components can share the same field only if their semantics is exactly -// the same; if in doubt, add a new field. -// It is always OK to pass nil instead of a SystemContext. -type SystemContext struct { - // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). - // Not used for any of the more specific path overrides available in this struct. - // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). - // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . - // and there is no need to worry about the environment.) - // NOTE: This does NOT affect paths starting by $HOME. - RootForImplicitAbsolutePaths string - - // === Global configuration overrides === - // If not "", overrides the system's default path for signature.Policy configuration. - SignaturePolicyPath string - // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) - RegistriesDirPath string - // Path to the system-wide registries configuration file - SystemRegistriesConfPath string - // If not "", overrides the default path for the authentication file - AuthFilePath string - // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. - ArchitectureChoice string - // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. - OSChoice string - // If not "", overrides the system's default directory containing a blob info cache. - BlobInfoCacheDir string - - // Additional tags when creating or copying a docker-archive. - DockerArchiveAdditionalTags []reference.NamedTagged - - // === OCI.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key - // (ending with ".key") used when downloading OCI image layers. - OCICertPath string - // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - OCIInsecureSkipTLSVerify bool - // If not "", use a shared directory for storing blobs rather than within OCI layouts - OCISharedBlobDirPath string - // Allow UnCompress image layer for OCI image layer - OCIAcceptUncompressedLayers bool - - // === docker.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key - // (ending with ".key") used when talking to a Docker Registry. - DockerCertPath string - // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. - // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - DockerInsecureSkipTLSVerify OptionalBool - // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials - DockerAuthConfig *DockerAuthConfig - // if not "", an User-Agent header is added to each request when contacting a registry. - DockerRegistryUserAgent string - // if true, a V1 ping attempt isn't done to give users a better error. Default is false. - // Note that this field is used mainly to integrate containers/image into projectatomic/docker - // in order to not break any existing docker's integration tests. - DockerDisableV1Ping bool - // Directory to use for OSTree temporary files - OSTreeTmpDirPath string - - // === docker/daemon.Transport overrides === - // A directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when talking to a Docker daemon. - DockerDaemonCertPath string - // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. - DockerDaemonHost string - // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. - DockerDaemonInsecureSkipTLSVerify bool - - // === dir.Transport overrides === - // DirForceCompress compresses the image layers if set to true - DirForceCompress bool - - // CompressionFormat is the format to use for the compression of the blobs - CompressionFormat *compression.Algorithm - // CompressionLevel specifies what compression level is used - CompressionLevel *int -} - -// ProgressProperties is used to pass information from the copy code to a monitor which -// can use the real-time information to produce output or react to changes. -type ProgressProperties struct { - Artifact BlobInfo - Offset uint64 -} diff --git a/vendor/github.com/containers/image/v4/version/version.go b/vendor/github.com/containers/image/v4/version/version.go deleted file mode 100644 index 2fa6706df..000000000 --- a/vendor/github.com/containers/image/v4/version/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package version - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 4 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 1 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/image/v5/LICENSE b/vendor/github.com/containers/image/v5/LICENSE new file mode 100644 index 000000000..953563530 --- /dev/null +++ b/vendor/github.com/containers/image/v5/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go new file mode 100644 index 000000000..090d862d5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -0,0 +1,1233 @@ +package copy + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb" + "github.com/vbauerster/mpb/decor" + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/sync/semaphore" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// maxParallelDownloads is used to limit the maxmimum number of parallel +// downloads. Let's follow Firefox by limiting it to 6. +var maxParallelDownloads = 6 + +// compressionBufferSize is the buffer size used to compress a blob +var compressionBufferSize = 1048576 + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + if err := expectedDigest.Validate(); err != nil { + return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + } + return &digestingReader{ + source: source, + digester: digestAlgorithm.Digester(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} + +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +type copier struct { + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache types.BlobInfoCache + copyInParallel bool + compressionFormat compression.Algorithm + compressionLevel *int +} + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + canModifyManifest bool + canSubstituteBlobs bool +} + +const ( + // CopySystemImage is the default value which, when set in + // Options.ImageListSelection, indicates that the caller expects only one + // image to be copied, so if the source reference refers to a list of + // images, one that matches the current system will be selected. + CopySystemImage ImageListSelection = iota + // CopyAllImages is a value which, when set in Options.ImageListSelection, + // indicates that the caller expects to copy multiple images, and if + // the source reference refers to a list, that the list and every image + // to which it refers will be copied. If the source reference refers + // to a list, the target reference can not accept lists, an error + // should be returned. + CopyAllImages + // CopySpecificImages is a value which, when set in + // Options.ImageListSelection, indicates that the caller expects the + // source reference to be either a single image or a list of images, + // and if the source reference is a list, wants only specific instances + // from it copied (or none of them, if the list of instances to copy is + // empty), along with the list itself. If the target reference can + // only accept one image (i.e., it cannot accept lists), an error + // should be returned. + CopySpecificImages +) + +// ImageListSelection is one of CopySystemImage, CopyAllImages, or +// CopySpecificImages, to control whether, when the source reference is a list, +// copy.Image() copies only an image which matches the current runtime +// environment, or all images which match the supplied reference, or only +// specific images from the source reference. +type ImageListSelection int + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + ForceManifestMIMEType string + ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list + Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself +} + +// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value +func validateImageListSelection(selection ImageListSelection) error { + switch selection { + case CopySystemImage, CopyAllImages, CopySpecificImages: + return nil + default: + return errors.Errorf("Invalid value for options.ImageListSelection: %d", selection) + } +} + +// Image copies image from srcRef to destRef, using policyContext to validate +// source image admissibility. It returns the manifest which was written to +// the new copy of the image. +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) { + // NOTE this function uses an output parameter for the error return value. + // Setting this and returning is the ideal way to return an error. + // + // the defers in this routine will wrap the error return with its own errors + // which can be valuable context in the middle of a multi-streamed copy. + if options == nil { + options = &Options{} + } + + if err := validateImageListSelection(options.ImageListSelection); err != nil { + return nil, err + } + + reportWriter := ioutil.Discard + + if options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + + dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + if err != nil { + return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) + } + defer func() { + if err := dest.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (dest: %v)", err) + } + }() + + rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + if err != nil { + return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) + } + defer func() { + if err := rawSource.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (src: %v)", err) + } + }() + + // If reportWriter is not a TTY (e.g., when piping to a file), do not + // print the progress bars to avoid long and hard to parse output. + // createProgressBar() will print a single line instead. + progressOutput := reportWriter + if !isTTY(reportWriter) { + progressOutput = ioutil.Discard + } + copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() + + c := &copier{ + dest: dest, + rawSource: rawSource, + reportWriter: reportWriter, + progressOutput: progressOutput, + progressInterval: options.ProgressInterval, + progress: options.Progress, + copyInParallel: copyInParallel, + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually + // we might want to add a separate CommonCtx — or would that be too confusing? + blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), + } + // Default to using gzip compression unless specified otherwise. + if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { + algo, err := compression.AlgorithmByName("gzip") + if err != nil { + return nil, err + } + c.compressionFormat = algo + } else { + c.compressionFormat = *options.DestinationCtx.CompressionFormat + } + if options.DestinationCtx != nil { + // Note that the compressionLevel can be nil. + c.compressionLevel = options.DestinationCtx.CompressionLevel + } + + unparsedToplevel := image.UnparsedInstance(rawSource, nil) + multiImage, err := isMultiImage(ctx, unparsedToplevel) + if err != nil { + return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) + } + + if !multiImage { + // The simple case: just copy a single image. + if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil { + return nil, err + } + } else if options.ImageListSelection == CopySystemImage { + // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that + // matches the current system to copy, and copy it. + mfest, manifestType, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, errors.Wrapf(err, "Error reading manifest for %s", transports.ImageName(srcRef)) + } + manifestList, err := manifest.ListFromBlob(mfest, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing primary manifest as list for %s", transports.ImageName(srcRef)) + } + instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx + if err != nil { + return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + + if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil { + return nil, err + } + } else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */ + // If we were asked to copy multiple images and can't, that's an error. + if !supportsMultipleImages(c.dest) { + return nil, errors.Errorf("Error copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) + } + // Copy some or all of the images. + switch options.ImageListSelection { + case CopyAllImages: + logrus.Debugf("Source is a manifest list; copying all instances") + case CopySpecificImages: + logrus.Debugf("Source is a manifest list; copying some instances") + } + if copiedManifest, _, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil { + return nil, err + } + } + + if err := c.dest.Commit(ctx, unparsedToplevel); err != nil { + return nil, errors.Wrap(err, "Error committing the finished image") + } + + return copiedManifest, nil +} + +// Checks if the destination supports accepting multiple images by checking if it can support +// manifest types that are lists of other manifests. +func supportsMultipleImages(dest types.ImageDestination) bool { + mtypes := dest.SupportedManifestMIMETypes() + if len(mtypes) == 0 { + // Anything goes! + return true + } + for _, mtype := range mtypes { + if manifest.MIMETypeIsMultiImage(mtype) { + return true + } + } + return false +} + +// copyMultipleImages copies some or all of an image list's instances, using +// policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, copiedManifestType string, retErr error) { + // Parse the list and get a copy of the original value after it's re-encoded. + manifestList, manifestType, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, "", errors.Wrapf(err, "Error reading manifest list") + } + list, err := manifest.ListFromBlob(manifestList, manifestType) + if err != nil { + return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList)) + } + originalList := list.Clone() + + // Read and/or clear the set of signatures for this list. + var sigs [][]byte + if options.RemoveSignatures { + sigs = [][]byte{} + } else { + c.Printf("Getting image list signatures\n") + s, err := c.rawSource.GetSignatures(ctx, nil) + if err != nil { + return nil, "", errors.Wrap(err, "Error reading signatures") + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("Checking if image list destination supports signatures\n") + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, "", errors.Wrap(err, "Can not copy signatures") + } + } + + // Determine if we'll need to convert the manifest list to a different format. + forceListMIMEType := options.ForceManifestMIMEType + switch forceListMIMEType { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + forceListMIMEType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest: + forceListMIMEType = imgspecv1.MediaTypeImageIndex + } + selectedListType, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + if err != nil { + return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination") + } + if selectedListType != list.MIMEType() { + canModifyManifestList := (len(sigs) == 0) + if !canModifyManifestList { + return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) + } + } + + // Copy each image, or just the ones we want to copy, in turn. + instanceDigests := list.Instances() + imagesToCopy := len(instanceDigests) + if options.ImageListSelection == CopySpecificImages { + imagesToCopy = len(options.Instances) + } + c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests)) + updates := make([]manifest.ListUpdate, len(instanceDigests)) + instancesCopied := 0 + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages { + skip := true + for _, instance := range options.Instances { + if instance == instanceDigest { + skip = false + break + } + } + if skip { + update, err := list.Instance(instanceDigest) + if err != nil { + return nil, "", err + } + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + // Record the digest/size/type of the manifest that we didn't copy. + updates[i] = update + continue + } + } + logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest) + updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest) + if err != nil { + return nil, "", err + } + instancesCopied++ + // Record the result of a possible conversion here. + update := manifest.ListUpdate{ + Digest: updatedManifestDigest, + Size: int64(len(updatedManifest)), + MediaType: updatedManifestType, + } + updates[i] = update + } + + // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. + if err = list.UpdateInstances(updates); err != nil { + return nil, "", errors.Wrapf(err, "Error updating manifest list") + } + + // Check if the updates meaningfully changed the list of images. + listIsModified := false + if !reflect.DeepEqual(list.Instances(), originalList.Instances()) { + listIsModified = true + } + + // Perform the list conversion. + if selectedListType != list.MIMEType() { + list, err = list.ConvertToMIMEType(selectedListType) + if err != nil { + return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", selectedListType) + } + } + + // If we can't use the original value, but we have to change it, flag an error. + if listIsModified { + manifestList, err = list.Serialize() + if err != nil { + return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", list.MIMEType(), list.Instances()) + } + logrus.Debugf("Manifest list has been updated") + } + + // Save the manifest list. + c.Printf("Writing manifest list to image destination\n") + if err = c.dest.PutManifest(ctx, manifestList, nil); err != nil { + return nil, "", errors.Wrapf(err, "Error writing manifest list %q", string(manifestList)) + } + + // Sign the manifest list. + if options.SignBy != "" { + newSig, err := c.createSignature(manifestList, options.SignBy) + if err != nil { + return nil, "", err + } + sigs = append(sigs, newSig) + } + + c.Printf("Storing list signatures\n") + if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil { + return nil, "", errors.Wrap(err, "Error writing signatures") + } + + return manifestList, selectedListType, nil +} + +// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate +// source image admissibility. +func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return nil, "", "", errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + } + if multiImage { + return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (The multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return nil, "", "", errors.Wrap(err, "Source image rejected") + } + src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) + if err != nil { + return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's + // one item from a manifest list that matches it, accept that as a match. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + sourceManifest, _, err := src.Manifest(ctx) + if err != nil { + return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image") + } + matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) + if err != nil { + return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest") + } + if !matches { + manifestList, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image") + } + matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest") + } + if !matches { + return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + } + + if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil { + return nil, "", "", err + } + + var sigs [][]byte + if options.RemoveSignatures { + sigs = [][]byte{} + } else { + c.Printf("Getting image source signatures\n") + s, err := src.Signatures(ctx) + if err != nil { + return nil, "", "", errors.Wrap(err, "Error reading signatures") + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("Checking if image destination supports signatures\n") + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, "", "", errors.Wrap(err, "Can not copy signatures") + } + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, + } + // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return nil, "", "", err + } + + // We compute preferredManifestMIMEType only to show it in error messages. + // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. + preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) + if err != nil { + return nil, "", "", err + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + + if err := ic.copyLayers(ctx); err != nil { + return nil, "", "", err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type. If the process succeeds, fine… + manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + retManifestType = preferredManifestMIMEType + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) + // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. + if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return nil, "", "", err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if !ic.canModifyManifest { + return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} + for _, manifestMIMEType := range otherManifestMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + manifestBytes = attemptedManifest + retManifestDigest = attemptedManifestDigest + retManifestType = manifestMIMEType + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + + if options.SignBy != "" { + newSig, err := c.createSignature(manifestBytes, options.SignBy) + if err != nil { + return nil, "", "", err + } + sigs = append(sigs, newSig) + } + + c.Printf("Storing signatures\n") + if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil { + return nil, "", "", errors.Wrap(err, "Error writing signatures") + } + + return manifestBytes, retManifestType, retManifestDigest, nil +} + +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...interface{}) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { + if dest.MustMatchRuntimeOS() { + wantedOS := runtime.GOOS + if sys != nil && sys.OSChoice != "" { + wantedOS = sys.OSChoice + } + c, err := src.OCIConfig(ctx) + if err != nil { + return errors.Wrapf(err, "Error parsing image configuration") + } + osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) + if wantedOS == "windows" && c.OS == "linux" { + return osErr + } else if wantedOS != "windows" && c.OS == "windows" { + return osErr + } + } + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if !ic.canModifyManifest { + return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", + transports.ImageName(ic.c.dest.Reference()), destRef.String()) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +// isTTY returns true if the io.Writer is a file and a tty. +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + return terminal.IsTerminal(int(f.Fd())) + } + return false +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +func (ic *imageCopier) copyLayers(ctx context.Context) error { + srcInfos := ic.src.LayerInfos() + numLayers := len(srcInfos) + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return err + } + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if !ic.canModifyManifest { + return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + copyGroup.Add(numLayers) + + // copySemaphore is used to limit the number of parallel downloads to + // avoid malicious images causing troubles and to be nice to servers. + var copySemaphore *semaphore.Weighted + if ic.c.copyInParallel { + copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) + } else { + copySemaphore = semaphore.NewWeighted(int64(1)) + } + + data := make([]copyLayerData, numLayers) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { + defer copySemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) + } + data[index] = cld + } + + func() { // A scope for defer + progressPool, progressCleanup := ic.c.newProgressPool(ctx) + defer progressCleanup() + + for i, srcLayer := range srcInfos { + copySemaphore.Acquire(ctx, 1) + go copyLayerHelper(i, srcLayer, progressPool) + } + + // Wait for all layers to be copied + copyGroup.Wait() + }() + + destInfos := make([]types.BlobInfo, numLayers) + diffIDs := make([]digest.Digest, numLayers) + for i, cld := range data { + if cld.err != nil { + return cld.err + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + return nil +} + +// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + if len(a) != len(b) { + return true + } + for i := range a { + if a[i].Digest != b[i].Digest { + return true + } + } + return false +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest +// and its digest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { + pendingImage := ic.src + if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { + if !ic.canModifyManifest { + return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, "", errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, "", errors.Wrap(err, "Error creating an updated image manifest") + } + pendingImage = pi + } + man, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, "", errors.Wrap(err, "Error reading manifest") + } + + if err := ic.c.copyConfig(ctx, pendingImage); err != nil { + return nil, "", err + } + + ic.c.Printf("Writing manifest to image destination\n") + manifestDigest, err := manifest.Digest(man) + if err != nil { + return nil, "", err + } + if instanceDigest != nil { + instanceDigest = &manifestDigest + } + if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { + return nil, "", errors.Wrap(err, "Error writing manifest") + } + return man, manifestDigest, nil +} + +// newProgressPool creates a *mpb.Progress and a cleanup function. +// The caller must eventually call the returned cleanup function after the pool will no longer be updated. +func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { + ctx, cancel := context.WithCancel(ctx) + pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx)) + return pool, func() { + cancel() + pool.Wait() + } +} + +// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter +// is ioutil.Discard, the progress bar's output will be discarded +func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + bar = pool.AddBar(info.Size, + mpb.BarClearOnComplete(), + mpb.PrependDecorators( + decor.Name(prefix), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete), + ), + ) + } else { + bar = pool.AddSpinner(info.Size, + mpb.SpinnerOnLeft, + mpb.BarClearOnComplete(), + mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}), + mpb.PrependDecorators( + decor.Name(prefix), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.Name(""), " "+onComplete), + ), + ) + } + if c.progressOutput == ioutil.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } + return bar +} + +// copyConfig copies config.json, if any, from src to dest. +func (c *copier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) + } + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool, progressCleanup := c.newProgressPool(ctx) + defer progressCleanup() + bar := c.createProgressBar(progressPool, srcInfo, "config", "done") + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) + if err != nil { + return types.BlobInfo{}, err + } + bar.SetTotal(int64(len(configBlob)), true) + return destInfo, nil + }() + if err != nil { + return nil + } + if destInfo.Digest != srcInfo.Digest { + return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { + cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + + // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. + if !diffIDIsNeeded { + reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") + bar.SetTotal(0, true) + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) + } + defer srcStream.Close() + + bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, Annotations: srcInfo.Annotations}, diffIDIsNeeded, bar) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + diffID = diffIDResult.digest + } + } + + bar.SetTotal(srcInfo.Size, true) + return blobInfo, diffID, nil +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps compressing the stream if canCompress, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() + }() + + getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps compressing it if canCompress, +// and returns a complete blobInfo of the copied blob. +func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, + canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers. + // === Input: srcStream + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) + } + var destStream io.Reader = digestingReader + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) + } + isCompressed := decompressor != nil + destStream = bar.ProxyReader(destStream) + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) + originalLayerReader = destStream + } + + desiredCompressionFormat := c.compressionFormat + + // === Deal with layer compression/decompression if necessary + var inputInfo types.BlobInfo + var compressionOperation types.LayerCompression + if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { + logrus.Debugf("Compressing blob on the fly") + compressionOperation = types.Compress + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + compressionOperation = types.PreserveOriginal + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter + + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { + logrus.Debugf("Blob will be decompressed") + compressionOperation = types.Decompress + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + destStream = s + inputInfo.Digest = "" + inputInfo.Size = -1 + } else { + // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. + logrus.Debugf("Using original blob without modification") + compressionOperation = types.PreserveOriginal + inputInfo = srcInfo + } + + // === Report progress using the c.progress channel, if required. + if c.progress != nil && c.progressInterval > 0 { + destStream = &progressReader{ + source: destStream, + channel: c.progress, + interval: c.progressInterval, + artifact: srcInfo, + lastTime: time.Now(), + } + } + + // === Finally, send the layer stream to dest. + uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") + } + + uploadedInfo.Annotations = srcInfo.Annotations + + uploadedInfo.CompressionOperation = compressionOperation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + if canModifyBlob && !isConfig { + uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(ioutil.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } + } + return uploadedInfo, nil +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() + }() + + compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel) + if err != nil { + return + } + defer compressor.Close() + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() +} diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go new file mode 100644 index 000000000..f5f6c9c5f --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -0,0 +1,154 @@ +package copy + +import ( + "context" + "strings" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. +type orderedSet struct { + list []string + included map[string]struct{} +} + +// newOrderedSet creates a correctly initialized orderedSet. +// [Sometimes it would be really nice if Golang had constructors…] +func newOrderedSet() *orderedSet { + return &orderedSet{ + list: []string{}, + included: map[string]struct{}{}, + } +} + +// append adds s to the end of os, only if it is not included already. +func (os *orderedSet) append(s string) { + if _, ok := os.included[s]; !ok { + os.list = append(os.list, s) + os.included[s] = struct{}{} + } +} + +// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. +// Note that the conversion will only happen later, through ic.src.UpdatedImage +// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), +// and a list of other possible alternatives, in order. +func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { + _, srcType, err := ic.src.Manifest(ctx) + if err != nil { // This should have been cached?! + return "", nil, errors.Wrap(err, "Error reading manifest") + } + normalizedSrcType := manifest.NormalizedMIMEType(srcType) + if srcType != normalizedSrcType { + logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) + srcType = normalizedSrcType + } + + if forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + } + + if len(destSupportedManifestMIMETypes) == 0 { + return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. + } + supportedByDest := map[string]struct{}{} + for _, t := range destSupportedManifestMIMETypes { + supportedByDest[t] = struct{}{} + } + + // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. + // So, build a list of types to try in order of decreasing preference. + // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, + // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. + // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types + // and never attempt the other one. + prioritizedTypes := newOrderedSet() + + // First of all, prefer to keep the original manifest unmodified. + if _, ok := supportedByDest[srcType]; ok { + prioritizedTypes.append(srcType) + } + if !ic.canModifyManifest { + // We could also drop the !ic.canModifyManifest check and have the caller + // make the choice; it is already doing that to an extent, to improve error + // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” + // special case in here; the caller can then worry (or not) only about a good UI. + logrus.Debugf("We can't modify the manifest, hoping for the best...") + return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? + } + + // Then use our list of preferred types. + for _, t := range preferredManifestMIMETypes { + if _, ok := supportedByDest[t]; ok { + prioritizedTypes.append(t) + } + } + + // Finally, try anything else the destination supports. + for _, t := range destSupportedManifestMIMETypes { + prioritizedTypes.append(t) + } + + logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. + return "", nil, errors.New("Internal error: no candidate MIME types") + } + preferredType := prioritizedTypes.list[0] + if preferredType != srcType { + ic.manifestUpdates.ManifestMIMEType = preferredType + } else { + logrus.Debugf("... will first try using the original manifest unmodified") + } + return preferredType, prioritizedTypes.list[1:], nil +} + +// isMultiImage returns true if img is a list of images +func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest(ctx) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} + +// determineListConversion takes the current MIME type of a list of manifests, +// the list of MIME types supported for a given destination, and a possible +// forced value, and returns the MIME type to which we should convert the list +// of manifests, whether we are converting to it or using it unmodified. +func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, error) { + // If we're forcing it, we prefer the forced value over everything else. + if forcedListMIMEType != "" { + return forcedListMIMEType, nil + } + // If there's no list of supported types, then anything we support is expected to be supported. + if len(destSupportedMIMETypes) == 0 { + destSupportedMIMETypes = manifest.SupportedListMIMETypes + } + var selectedType string + for i := range destSupportedMIMETypes { + // The second priority is the first member of the list of acceptable types that is a list, + // but keep going in case current type occurs later in the list. + if selectedType == "" && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) { + selectedType = destSupportedMIMETypes[i] + } + // The first priority is the current type, if it's in the list, since that lets us avoid a + // conversion that isn't strictly necessary. + if destSupportedMIMETypes[i] == currentListMIMEType { + selectedType = destSupportedMIMETypes[i] + } + } + if selectedType == "" { + return "", errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + } + // Done. + return selectedType, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_reader.go new file mode 100644 index 000000000..1d0c41bce --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_reader.go @@ -0,0 +1,28 @@ +package copy + +import ( + "io" + "time" + + "github.com/containers/image/v5/types" +) + +// progressReader is a reader that reports its progress on an interval. +type progressReader struct { + source io.Reader + channel chan types.ProgressProperties + interval time.Duration + artifact types.BlobInfo + lastTime time.Time + offset uint64 +} + +func (r *progressReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.offset += uint64(n) + if time.Since(r.lastTime) > r.interval { + r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset} + r.lastTime = time.Now() + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go new file mode 100644 index 000000000..8f46e9de6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -0,0 +1,31 @@ +package copy + +import ( + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" + "github.com/pkg/errors" +) + +// createSignature creates a new signature of manifest using keyIdentity. +func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, errors.Wrap(err, "Error initializing GPG") + } + defer mech.Close() + if err := mech.SupportsSigning(); err != nil { + return nil, errors.Wrap(err, "Signing not supported") + } + + dockerReference := c.dest.Reference().DockerReference() + if dockerReference == nil { + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + + c.Printf("Signing manifest\n") + newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) + if err != nil { + return nil, errors.Wrap(err, "Error creating signature") + } + return newSig, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go new file mode 100644 index 000000000..2d6650de7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -0,0 +1,267 @@ +package directory + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const version = "Directory Transport Version: 1.1\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + +type dirImageDestination struct { + ref dirReference + compress bool +} + +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { + d := &dirImageDestination{ref: ref, compress: compress} + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(d.ref.resolvedPath) + if err != nil { + return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) + } + if dirExists { + isEmpty, err := isDirEmpty(d.ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(d.ref.versionPath()) + if err != nil { + return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) + } + if versionExists { + contents, err := ioutil.ReadFile(d.ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(d.ref.resolvedPath); err != nil { + return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) + } + logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { + return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + } + } + // create version file + err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) + if err != nil { + return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dirImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dirImageDestination) Close() error { + return nil +} + +func (d *dirImageDestination) SupportedManifestMIMETypes() []string { + return nil +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.compress { + return types.Compress + } + return types.PreserveOriginal +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *dirImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dirImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + defer func() { + blobFile.Close() + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + blobPath := d.ref.layerPath(computedDigest) + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil + +} + +// PutManifest writes manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { + return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) +} + +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + for i, sig := range signatures { + if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { + return err + } + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error { + return nil +} + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if err != nil && os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go new file mode 100644 index 000000000..ad9129d40 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -0,0 +1,96 @@ +package directory + +import ( + "context" + "io" + "io/ioutil" + "os" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +type dirImageSource struct { + ref dirReference +} + +// newImageSource returns an ImageSource reading from an existing directory. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ref dirReference) types.ImageSource { + return &dirImageSource{ref} +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dirImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dirImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest)) + if err != nil { + return nil, "", err + } + return m, manifest.GuessMIMEType(m), err +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dirImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + r, err := os.Open(s.ref.layerPath(info.Digest)) + if err != nil { + return nil, -1, err + } + fi, err := r.Stat() + if err != nil { + return nil, -1, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + signatures := [][]byte{} + for i := 0; ; i++ { + signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest)) + if err != nil { + if os.IsNotExist(err) { + break + } + return nil, err + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go new file mode 100644 index 000000000..adfec6ef3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -0,0 +1,193 @@ +package directory + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for directory paths. +var Transport = dirTransport{} + +type dirTransport struct{} + +func (t dirTransport) Name() string { + return "dir" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// dirReference is an ImageReference for directory paths. +type dirReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + path string // As specified by the user. May be relative, contain symlinks, etc. + resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no directory.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// dirTransport.ParseReference; callers who intentionally deal with directories +// can use directory.NewReference. + +// NewReference returns a directory reference for a specified path. +// +// We do not expose an API supplying the resolvedPath; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedPath. +func NewReference(path string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) + if err != nil { + return nil, err + } + return dirReference{path: path, resolvedPath: resolved}, nil +} + +func (ref dirReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dirReference) StringWithinTransport() string { + return ref.path +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dirReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dirReference) PolicyConfigurationIdentity() string { + return ref.resolvedPath +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dirReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedPath + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by dirTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src := newImageSource(ref) + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ref), nil +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + compress := false + if sys != nil { + compress = sys.DirForceCompress + } + return newImageDestination(ref, compress) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for dir: images") +} + +// manifestPath returns a path for the manifest within a directory using our conventions. +func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json") + } + return filepath.Join(ref.path, "manifest.json") +} + +// layerPath returns a path for a layer tarball within a directory using our conventions. +func (ref dirReference) layerPath(digest digest.Digest) string { + // FIXME: Should we keep the digest identification? + return filepath.Join(ref.path, digest.Encoded()) +} + +// signaturePath returns a path for a signature within a directory using our conventions. +func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)) + } + return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) +} + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go new file mode 100644 index 000000000..71136b880 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -0,0 +1,56 @@ +package explicitfilepath + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch _, err := os.Lstat(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go new file mode 100644 index 000000000..5845f63be --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go @@ -0,0 +1,72 @@ +package archive + +import ( + "context" + "io" + "os" + + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +type archiveImageDestination struct { + *tarfile.Destination // Implements most of types.ImageDestination + ref archiveReference + writer io.Closer +} + +func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { + // ref.path can be either a pipe or a regular file + // in the case of a pipe, we require that we can open it for write + // in the case of a regular file, we don't want to overwrite any pre-existing file + // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, + // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) + fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, errors.Wrapf(err, "error opening file %q", ref.path) + } + + fhStat, err := fh.Stat() + if err != nil { + return nil, errors.Wrapf(err, "error statting file %q", ref.path) + } + + if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { + return nil, errors.New("docker-archive doesn't support modifying existing images") + } + + tarDest := tarfile.NewDestination(fh, ref.destinationRef) + if sys != nil && sys.DockerArchiveAdditionalTags != nil { + tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) + } + return &archiveImageDestination{ + Destination: tarDest, + ref: ref, + writer: fh, + }, nil +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Decompress +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *archiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *archiveImageDestination) Close() error { + return d.writer.Close() +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.Destination.Commit(ctx) +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go new file mode 100644 index 000000000..a90707437 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/src.go @@ -0,0 +1,35 @@ +package archive + +import ( + "context" + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +type archiveImageSource struct { + *tarfile.Source // Implements most of types.ImageSource + ref archiveReference +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) { + if ref.destinationRef != nil { + logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") + } + src, err := tarfile.NewSourceFromFile(ref.path) + if err != nil { + return nil, err + } + return &archiveImageSource{ + Source: src, + ref: ref, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *archiveImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go new file mode 100644 index 000000000..44213bb8d --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -0,0 +1,160 @@ +package archive + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + ctrImage "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for local Docker archives. +var Transport = archiveTransport{} + +type archiveTransport struct{} + +func (t archiveTransport) Name() string { + return "docker-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in archiveReference.PolicyConfigurationIdentity. + return errors.New(`docker-archive: does not support any scopes except the default "" one`) +} + +// archiveReference is an ImageReference for Docker images. +type archiveReference struct { + // only used for destinations + // archiveReference.destinationRef is optional and can be nil for destinations as well. + destinationRef reference.NamedTagged + path string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if refString == "" { + return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) + } + + parts := strings.SplitN(refString, ":", 2) + path := parts[0] + var destinationRef reference.NamedTagged + + // A :tag was specified, which is only necessary for destinations. + if len(parts) == 2 { + ref, err := reference.ParseNormalizedNamed(parts[1]) + if err != nil { + return nil, errors.Wrapf(err, "docker-archive parsing reference") + } + ref = reference.TagNameOnly(ref) + + if _, isDigest := ref.(reference.Canonical); isDigest { + return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) + } + + refTagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + // Really shouldn't be hit... + return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString) + } + destinationRef = refTagged + } + + return archiveReference{ + destinationRef: destinationRef, + path: path, + }, nil +} + +func (ref archiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref archiveReference) StringWithinTransport() string { + if ref.destinationRef == nil { + return ref.path + } + return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String()) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref archiveReference) DockerReference() reference.Named { + return ref.destinationRef +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref archiveReference) PolicyConfigurationIdentity() string { + // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. + return "" +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref archiveReference) PolicyConfigurationNamespaces() []string { + // TODO + return []string{} +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, ref) + if err != nil { + return nil, err + } + return ctrImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Not really supported, for safety reasons. + return errors.New("Deleting images not implemented for docker-archive: images") +} diff --git a/vendor/github.com/containers/image/v5/docker/cache.go b/vendor/github.com/containers/image/v5/docker/cache.go new file mode 100644 index 000000000..728d32d17 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/cache.go @@ -0,0 +1,23 @@ +package docker + +import ( + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +// bicTransportScope returns a BICTransportScope appropriate for ref. +func bicTransportScope(ref dockerReference) types.BICTransportScope { + // Blobs can be reused across the whole registry. + return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} +} + +// newBICLocationReference returns a BICLocationReference appropriate for ref. +func newBICLocationReference(ref dockerReference) types.BICLocationReference { + // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). + return types.BICLocationReference{Opaque: ref.ref.Name()} +} + +// parseBICLocationReference returns a repository for encoded lr. +func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { + return reference.ParseNormalizedNamed(lr.Opaque) +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go new file mode 100644 index 000000000..323a02fc0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -0,0 +1,85 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/v5/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if sys != nil && sys.DockerDaemonHost != "" { + host = sys.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // + // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set + // TLSClientConfig to nil. This can be achieved by using the form `http://` + url, err := dockerclient.ParseHostURL(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if url.Scheme != "unix" { + if url.Scheme == "http" { + httpClient = httpConfig() + } else { + hc, err := tlsConfig(sys) + if err != nil { + return nil, err + } + httpClient = hc + } + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(sys *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if sys != nil && sys.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} + +func httpConfig() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: nil, + }, + CheckRedirect: dockerclient.CheckRedirect, + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go new file mode 100644 index 000000000..25ce55a17 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -0,0 +1,144 @@ +package daemon + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type daemonImageDestination struct { + ref daemonReference + mustMatchRuntimeOS bool + *tarfile.Destination // Implements most of types.ImageDestination + // For talking to imageLoadGoroutine + goroutineCancel context.CancelFunc + statusChannel <-chan error + writer *io.PipeWriter + // Other state + committed bool // writer has been closed +} + +// newImageDestination returns a types.ImageDestination for the specified image reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { + if ref.ref == nil { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + namedTaggedRef, ok := ref.ref.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + + var mustMatchRuntimeOS = true + if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "Error initializing docker engine client") + } + + reader, writer := io.Pipe() + // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. + statusChannel := make(chan error, 1) + + goroutineContext, goroutineCancel := context.WithCancel(ctx) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) + + return &daemonImageDestination{ + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(writer, namedTaggedRef), + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, + }, nil +} + +// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel +func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") + defer func() { + logrus.Debugf("docker-daemon: sending done, status %v", err) + statusChannel <- err + }() + defer func() { + if err == nil { + reader.Close() + } else { + reader.CloseWithError(err) + } + }() + + resp, err := c.ImageLoad(ctx, reader, true) + if err != nil { + err = errors.Wrap(err, "Error saving image to docker engine") + return + } + defer resp.Body.Close() +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *daemonImageDestination) MustMatchRuntimeOS() bool { + return d.mustMatchRuntimeOS +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *daemonImageDestination) Close() error { + if !d.committed { + logrus.Debugf("docker-daemon: Closing tar stream to abort loading") + // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. + // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including + // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the + // net/http version with native Context support in Go 1.7) do not always actually immediately cancel + // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and + // return early if the context is canceled without terminating the goroutine at all. + // So we need this CloseWithError to terminate sending the HTTP request Body + // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending + // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. + // Whether that works or not, closing the PipeWriter seems desirable in any case. + d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) + } + d.goroutineCancel() + + return nil +} + +func (d *daemonImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + logrus.Debugf("docker-daemon: Closing tar stream") + if err := d.Destination.Commit(ctx); err != nil { + return err + } + if err := d.writer.Close(); err != nil { + return err + } + d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. + + logrus.Debugf("docker-daemon: Waiting for status") + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-d.statusChannel: + return err + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go new file mode 100644 index 000000000..46fbcc4e0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go @@ -0,0 +1,57 @@ +package daemon + +import ( + "context" + + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +type daemonImageSource struct { + ref daemonReference + *tarfile.Source // Implements most of types.ImageSource +} + +type layerInfo struct { + path string + size int64 +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +// +// It would be great if we were able to stream the input tar as it is being +// sent; but Docker sends the top-level manifest, which determines which paths +// to look for, at the end, so in we will need to seek back and re-read, several times. +// (We could, perhaps, expect an exact sequence, assume that the first plaintext file +// is the config, and that the following len(RootFS) files are the layers, but that feels +// way too brittle.) +func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "Error initializing docker engine client") + } + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. + // Either way ImageSave should create a tarball with exactly one image. + inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) + if err != nil { + return nil, errors.Wrap(err, "Error loading image from docker engine") + } + defer inputStream.Close() + + src, err := tarfile.NewSourceFromStream(inputStream) + if err != nil { + return nil, err + } + return &daemonImageSource{ + ref: ref, + Source: src, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *daemonImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go new file mode 100644 index 000000000..4e4ed6881 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -0,0 +1,223 @@ +package daemon + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for images managed by a local Docker daemon. +var Transport = daemonTransport{} + +type daemonTransport struct{} + +// Name returns the name of the transport, which must be unique among other transports. +func (t daemonTransport) Name() string { + return "docker-daemon" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { + // ID values cannot be effectively namespaced, and are clearly invalid host:port values. + if _, err := digest.Parse(scope); err == nil { + return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) + } + + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// daemonReference is an ImageReference for images managed by a local Docker daemon +// Exactly one of id and ref can be set. +// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) +// For daemonImageDestination, it must be a ref, which is NamedTagged. +// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +type daemonReference struct { + id digest.Digest + ref reference.Named // !reference.IsNameOnly +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. + // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). + + // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). + // reference.ParseAnyReference interprets such strings as digests. + if dgst, err := digest.Parse(refString); err == nil { + // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. + // Other digest references are ambiguous, so refuse them. + if dgst.Algorithm() != digest.Canonical { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + } + return NewReference(dgst, nil) + } + + ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values + if err != nil { + return nil, err + } + if reference.FamiliarName(ref) == digest.Canonical.String() { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + } + return NewReference("", ref) +} + +// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) +func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { + if id != "" && ref != nil { + return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") + } + if ref != nil { + if reference.IsNameOnly(ref) { + return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. + // This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + } + } + return daemonReference{ + id: id, + ref: ref, + }, nil +} + +func (ref daemonReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref daemonReference) StringWithinTransport() string { + switch { + case ref.id != "": + return ref.id.String() + case ref.ref != nil: + return reference.FamiliarString(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref daemonReference) DockerReference() reference.Named { + return ref.ref // May be nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref daemonReference) PolicyConfigurationIdentity() string { + // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. + // But the existence of image IDs means that we can’t truly well namespace the input: + // a single image can be namespaced either using the name or the ID depending on how it is named. + // + // That’s fairly unexpected, but we have to cope somehow. + // + // So, use the ordinary docker/policyconfiguration namespacing for named images. + // image IDs all fall into the root namespace. + // Users can set up the root namespace to be either untrusted or rejected, + // and to set up specific trust for named namespaces. This allows verifying image + // identity when a name is known, and unnamed images would be untrusted or rejected. + switch { + case ref.id != "": + return "" // This still allows using the default "" scope to define a global policy for ID-identified images. + case ref.ref != nil: + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref daemonReference) PolicyConfigurationNamespaces() []string { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + switch { + case ref.id != "": + return []string{} + case ref.ref != nil: + return policyconfiguration.DockerReferenceNamespaces(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Should this just untag the image? Should this stop running containers? + // The semantics is not quite as clear as for remote repositories. + // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. + return errors.Errorf("Deleting images not implemented for docker-daemon: images") +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go new file mode 100644 index 000000000..0b012c703 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -0,0 +1,703 @@ +package docker + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + clientLib "github.com/docker/distribution/registry/client" + "github.com/docker/go-connections/tlsconfig" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + dockerHostname = "docker.io" + dockerV1Hostname = "index.docker.io" + dockerRegistry = "registry-1.docker.io" + + resolvedPingV2URL = "%s://%s/v2/" + resolvedPingV1URL = "%s://%s/v1/_ping" + tagsPath = "/v2/%s/tags/list" + manifestPath = "/v2/%s/manifests/%s" + blobsPath = "/v2/%s/blobs/%s" + blobUploadPath = "/v2/%s/blobs/uploads/" + extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" + + minimumTokenLifetimeSeconds = 60 + + extensionSignatureSchemaVersion = 2 // extensionSignature.Version + extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type +) + +var systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} + +// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: +// signature represents a Docker image signature. +type extensionSignature struct { + Version int `json:"schemaVersion"` // Version specifies the schema version + Name string `json:"name"` // Name must be in "sha256:@signatureName" format + Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" + Content []byte `json:"content"` // Content contains the signature +} + +// signatureList represents list of Docker image signatures. +type extensionSignatureList struct { + Signatures []extensionSignature `json:"signatures"` +} + +type bearerToken struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + expirationTime time.Time +} + +// dockerClient is configuration for dealing with a single Docker registry. +type dockerClient struct { + // The following members are set by newDockerClient and do not change afterwards. + sys *types.SystemContext + registry string + + // tlsClientConfig is setup by newDockerClient and will be used and updated + // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. + tlsClientConfig *tls.Config + // The following members are not set by newDockerClient and must be set by callers if needed. + username string + password string + signatureBase signatureStorageBase + scope authScope + + // The following members are detected registry properties: + // They are set after a successful detectProperties(), and never change afterwards. + client *http.Client + scheme string + challenges []challenge + supportsSignatures bool + + // Private state for setupRequestAuth (key: string, value: bearerToken) + tokenCache sync.Map + // Private state for detectProperties: + detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. + detectPropertiesError error // detectPropertiesError caches the initial error. +} + +type authScope struct { + remoteName string + actions string +} + +// sendAuth determines whether we need authentication for v2 or v1 endpoint. +type sendAuth int + +const ( + // v2 endpoint with authentication. + v2Auth sendAuth = iota + // v1 endpoint with authentication. + // TODO: Get v1Auth working + // v1Auth + // no authentication, works for both v1 and v2. + noAuth +) + +func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { + token := new(bearerToken) + if err := json.Unmarshal(blob, &token); err != nil { + return nil, err + } + if token.Token == "" { + token.Token = token.AccessToken + } + if token.ExpiresIn < minimumTokenLifetimeSeconds { + token.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) + } + if token.IssuedAt.IsZero() { + token.IssuedAt = time.Now().UTC() + } + token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) + return token, nil +} + +// this is cloned from docker/go-connections because upstream docker has changed +// it and make deps here fails otherwise. +// We'll drop this once we upgrade to docker 1.13.x deps. +func serverDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } +} + +// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. +func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { + if sys != nil && sys.DockerCertPath != "" { + return sys.DockerCertPath, nil + } + if sys != nil && sys.DockerPerHostCertDirPath != "" { + return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil + } + + var ( + hostCertDir string + fullCertDirPath string + ) + for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) + } else { + hostCertDir = systemPerHostCertDirPath + } + + fullCertDirPath = filepath.Join(hostCertDir, hostPort) + _, err := os.Stat(fullCertDirPath) + if err == nil { + break + } + if os.IsNotExist(err) { + continue + } + if os.IsPermission(err) { + logrus.Debugf("error accessing certs directory due to permissions: %v", err) + continue + } + if err != nil { + return "", err + } + } + return fullCertDirPath, nil +} + +// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) +// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) +func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { + registry := reference.Domain(ref.ref) + username, password, err := config.GetAuthentication(sys, registry) + if err != nil { + return nil, errors.Wrapf(err, "error getting username and password") + } + sigBase, err := configuredSignatureStorageBase(sys, ref, write) + if err != nil { + return nil, err + } + + client, err := newDockerClient(sys, registry, ref.ref.Name()) + if err != nil { + return nil, err + } + client.username = username + client.password = password + client.signatureBase = sigBase + client.scope.actions = actions + client.scope.remoteName = reference.Path(ref.ref) + return client, nil +} + +// newDockerClient returns a new dockerClient instance for the given registry +// and reference. The reference is used to query the registry configuration +// and can either be a registry (e.g, "registry.com[:5000]"), a repository +// (e.g., "registry.com[:5000][/some/namespace]/repo"). +// Please note that newDockerClient does not set all members of dockerClient +// (e.g., username and password); those must be set by callers if necessary. +func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { + hostName := registry + if registry == dockerHostname { + registry = dockerRegistry + } + tlsClientConfig := serverDefault() + + // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, + // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible + // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because + // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is + // undocumented and may change if docker/docker changes. + certDir, err := dockerCertDir(sys, hostName) + if err != nil { + return nil, err + } + if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { + return nil, err + } + + // Check if TLS verification shall be skipped (default=false) which can + // be specified in the sysregistriesv2 configuration. + skipVerify := false + reg, err := sysregistriesv2.FindRegistry(sys, reference) + if err != nil { + return nil, errors.Wrapf(err, "error loading registries") + } + if reg != nil { + if reg.Blocked { + return nil, fmt.Errorf("registry %s is blocked in %s", reg.Prefix, sysregistriesv2.ConfigPath(sys)) + } + skipVerify = reg.Insecure + } + tlsClientConfig.InsecureSkipVerify = skipVerify + + return &dockerClient{ + sys: sys, + registry: registry, + tlsClientConfig: tlsClientConfig, + }, nil +} + +// CheckAuth validates the credentials by attempting to log into the registry +// returns an error if an error occurred while making the http request or the status code received was 401 +func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { + client, err := newDockerClient(sys, registry, registry) + if err != nil { + return errors.Wrapf(err, "error creating new docker client") + } + client.username = username + client.password = password + + resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return httpResponseToError(resp) +} + +// SearchResult holds the information of each matching image +// It matches the output returned by the v1 endpoint +type SearchResult struct { + Name string `json:"name"` + Description string `json:"description"` + // StarCount states the number of stars the image has + StarCount int `json:"star_count"` + IsTrusted bool `json:"is_trusted"` + // IsAutomated states whether the image is an automated build + IsAutomated bool `json:"is_automated"` + // IsOfficial states whether the image is an official build + IsOfficial bool `json:"is_official"` +} + +// SearchRegistry queries a registry for images that contain "image" in their name +// The limit is the max number of results desired +// Note: The limit value doesn't work with all registries +// for example registry.access.redhat.com returns all the results without limiting it to the limit value +func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { + type V2Results struct { + // Repositories holds the results returned by the /v2/_catalog endpoint + Repositories []string `json:"repositories"` + } + type V1Results struct { + // Results holds the results returned by the /v1/search endpoint + Results []SearchResult `json:"results"` + } + v2Res := &V2Results{} + v1Res := &V1Results{} + + // Get credentials from authfile for the underlying hostname + username, password, err := config.GetAuthentication(sys, registry) + if err != nil { + return nil, errors.Wrapf(err, "error getting username and password") + } + + // The /v2/_catalog endpoint has been disabled for docker.io therefore + // the call made to that endpoint will fail. So using the v1 hostname + // for docker.io for simplicity of implementation and the fact that it + // returns search results. + hostname := registry + if registry == dockerHostname { + hostname = dockerV1Hostname + } + + client, err := newDockerClient(sys, hostname, registry) + if err != nil { + return nil, errors.Wrapf(err, "error creating new docker client") + } + client.username = username + client.password = password + + // Only try the v1 search endpoint if the search query is not empty. If it is + // empty skip to the v2 endpoint. + if image != "" { + // set up the query values for the v1 endpoint + u := url.URL{ + Path: "/v1/search", + } + q := u.Query() + q.Set("q", image) + q.Set("n", strconv.Itoa(limit)) + u.RawQuery = q.Encode() + + logrus.Debugf("trying to talk to v1 search endpoint") + resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil) + if err != nil { + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) + } else { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp)) + } else { + if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { + return nil, err + } + return v1Res.Results, nil + } + } + } + + logrus.Debugf("trying to talk to v2 search endpoint") + resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil) + if err != nil { + logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) + } else { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp)) + } else { + if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { + return nil, err + } + searchRes := []SearchResult{} + for _, repo := range v2Res.Repositories { + if strings.Contains(repo, image) { + res := SearchResult{ + Name: repo, + } + searchRes = append(searchRes, res) + } + } + return searchRes, nil + } + } + + return nil, errors.Wrapf(err, "couldn't search registry %q", registry) +} + +// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. +func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { + if err := c.detectProperties(ctx); err != nil { + return nil, err + } + + url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) +} + +// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// In case of an http 429 status code in the response, it performs an exponential back off starting at 2 seconds for at most 5 iterations. +// If the `Retry-After` header is set in the response, the specified value or date is +// If the stream is non-nil, no back off will be performed. +// TODO(runcom): too many arguments here, use a struct +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + var ( + res *http.Response + err error + delay int64 + ) + delay = 2 + const numIterations = 5 + const maxDelay = 60 + + // math.Min() only supports float64, so have an anonymous func to avoid + // casting. + min := func(a int64, b int64) int64 { + if a < b { + return a + } + return b + } + + nextDelay := func(r *http.Response, delay int64) int64 { + after := res.Header.Get("Retry-After") + if after == "" { + return min(delay, maxDelay) + } + logrus.Debugf("detected 'Retry-After' header %q", after) + // First check if we have a numerical value. + if num, err := strconv.ParseInt(after, 10, 64); err == nil { + return min(num, maxDelay) + } + // Secondly check if we have an http date. + // If the delta between the date and now is positive, use it. + // Otherwise, fall back to using the default exponential back off. + if t, err := http.ParseTime(after); err == nil { + delta := int64(t.Sub(time.Now()).Seconds()) + if delta > 0 { + return min(delta, maxDelay) + } + logrus.Debugf("negative date: falling back to using %d seconds", delay) + return min(delay, maxDelay) + } + // If the header contains bogus, fall back to using the default + // exponential back off. + logrus.Debugf("invalid format: falling back to using %d seconds", delay) + return min(delay, maxDelay) + } + + for i := 0; i < numIterations; i++ { + res, err = c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope) + if stream == nil && res != nil && res.StatusCode == http.StatusTooManyRequests { + if i < numIterations-1 { + logrus.Errorf("HEADER %v", res.Header) + delay = nextDelay(res, delay) // compute next delay - does NOT exceed maxDelay + logrus.Debugf("too many request to %s: sleeping for %d seconds before next attempt", url, delay) + time.Sleep(time.Duration(delay) * time.Second) + delay = delay * 2 // exponential back off + } + continue + } + break + } + return res, err +} + +// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// Note that no exponential back off is performed when receiving an http 429 status code. +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequest(method, url, stream) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. + req.ContentLength = streamLen + } + req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") + for n, h := range headers { + for _, hh := range h { + req.Header.Add(n, hh) + } + } + if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { + req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) + } + if auth == v2Auth { + if err := c.setupRequestAuth(req, extraScope); err != nil { + return nil, err + } + } + logrus.Debugf("%s %s", method, url) + res, err := c.client.Do(req) + if err != nil { + return nil, err + } + return res, nil +} + +// we're using the challenges from the /v2/ ping response and not the one from the destination +// URL in this request because: +// +// 1) docker does that as well +// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request +// +// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up +func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { + if len(c.challenges) == 0 { + return nil + } + schemeNames := make([]string, 0, len(c.challenges)) + for _, challenge := range c.challenges { + schemeNames = append(schemeNames, challenge.Scheme) + switch challenge.Scheme { + case "basic": + req.SetBasicAuth(c.username, c.password) + return nil + case "bearer": + cacheKey := "" + scopes := []authScope{c.scope} + if extraScope != nil { + // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). + cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) + scopes = append(scopes, *extraScope) + } + var token bearerToken + t, inCache := c.tokenCache.Load(cacheKey) + if inCache { + token = t.(bearerToken) + } + if !inCache || time.Now().After(token.expirationTime) { + t, err := c.getBearerToken(req.Context(), challenge, scopes) + if err != nil { + return err + } + token = *t + c.tokenCache.Store(cacheKey, token) + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) + return nil + default: + logrus.Debugf("no handler for %s authentication", challenge.Scheme) + } + } + logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) + return nil +} + +func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.Errorf("missing realm in bearer auth challenge") + } + + authReq, err := http.NewRequest("GET", realm, nil) + if err != nil { + return nil, err + } + authReq = authReq.WithContext(ctx) + getParams := authReq.URL.Query() + if c.username != "" { + getParams.Add("account", c.username) + } + if service, ok := challenge.Parameters["service"]; ok && service != "" { + getParams.Add("service", service) + } + for _, scope := range scopes { + if scope.remoteName != "" && scope.actions != "" { + getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + } + } + authReq.URL.RawQuery = getParams.Encode() + if c.username != "" && c.password != "" { + authReq.SetBasicAuth(c.username, c.password) + } + logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + res, err := c.client.Do(authReq) + if err != nil { + return nil, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusUnauthorized: + err := clientLib.HandleErrorResponse(res) + logrus.Debugf("Server response when trying to obtain an access token: \n%q", err.Error()) + return nil, ErrUnauthorizedForCredentials{Err: err} + case http.StatusOK: + break + default: + return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) + } + tokenBlob, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + return newBearerTokenFromJSONBlob(tokenBlob) +} + +// detectPropertiesHelper performs the work of detectProperties which executes +// it at most once. +func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { + // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly + // specified by the system context + if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { + c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue + } + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = c.tlsClientConfig + c.client = &http.Client{Transport: tr} + + ping := func(scheme string) error { + url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + return err + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return httpResponseToError(resp) + } + c.challenges = parseAuthHeader(resp.Header) + c.scheme = scheme + c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" + return nil + } + err := ping("https") + if err != nil && c.tlsClientConfig.InsecureSkipVerify { + err = ping("http") + } + if err != nil { + err = errors.Wrapf(err, "error pinging docker registry %s", c.registry) + if c.sys != nil && c.sys.DockerDisableV1Ping { + return err + } + // best effort to understand if we're talking to a V1 registry + pingV1 := func(scheme string) bool { + url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + return false + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return false + } + return true + } + isV1 := pingV1("https") + if !isV1 && c.tlsClientConfig.InsecureSkipVerify { + isV1 = pingV1("http") + } + if isV1 { + err = ErrV1NotSupported + } + } + return err +} + +// detectProperties detects various properties of the registry. +// See the dockerClient documentation for members which are affected by this. +func (c *dockerClient) detectProperties(ctx context.Context) error { + c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) + return c.detectPropertiesError +} + +// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, +// using the original data structures. +func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) + res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) + } + + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + var parsedBody extensionSignatureList + if err := json.Unmarshal(body, &parsedBody); err != nil { + return nil, errors.Wrapf(err, "Error decoding signature list") + } + return &parsedBody, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go new file mode 100644 index 000000000..dad382cd0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -0,0 +1,105 @@ +package docker + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods +// which are specific to Docker. +type Image struct { + types.ImageCloser + src *dockerImageSource +} + +// newImage returns a new Image interface type after setting up +// a client to the registry hosting the given image. +// The caller must call .Close() on the returned Image. +func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { + s, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, s) + if err != nil { + return nil, err + } + return &Image{ImageCloser: img, src: s}, nil +} + +// SourceRefFullName returns a fully expanded name for the repository this image is in. +func (i *Image) SourceRefFullName() string { + return i.src.ref.ref.Name() +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. (This is a +// backward-compatible shim method which calls the module-level +// GetRepositoryTags) +func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { + return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref) +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. +func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { + dr, ok := ref.(dockerReference) + if !ok { + return nil, errors.Errorf("ref must be a dockerReference") + } + + path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) + client, err := newDockerClientFromRef(sys, dr, false, "pull") + if err != nil { + return nil, errors.Wrap(err, "failed to create client") + } + + tags := make([]string, 0) + + for { + res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := httpResponseToError(res); err != nil { + return nil, err + } + + var tagsHolder struct { + Tags []string + } + if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { + return nil, err + } + tags = append(tags, tagsHolder.Tags...) + + link := res.Header.Get("Link") + if link == "" { + break + } + + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + // can be relative or absolute, but we only want the path (and I + // guess we're in trouble if it forwards to a new place...) + path = linkURL.Path + if linkURL.RawQuery != "" { + path += "?" + path += linkURL.RawQuery + } + } + return tags, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go new file mode 100644 index 000000000..417d97aec --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -0,0 +1,641 @@ +package docker + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/docker/distribution/registry/api/errcode" + v2 "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerImageDestination struct { + ref dockerReference + c *dockerClient + // State + manifestDigest digest.Digest // or "" if not yet known. +} + +// newImageDestination creates a new ImageDestination for the specified image reference. +func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { + c, err := newDockerClientFromRef(sys, ref, true, "pull,push") + if err != nil { + return nil, err + } + return &dockerImageDestination{ + ref: ref, + c: c, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dockerImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dockerImageDestination) Close() error { + return nil +} + +func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + imgspecv1.MediaTypeImageIndex, + manifest.DockerV2ListMediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.signatureBase != nil: + return nil + case d.c.supportsSignatures: + return nil + default: + return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") + } +} + +func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *dockerImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. +} + +// sizeCounter is an io.Writer which only counts the total size of its input. +type sizeCounter struct{ size int64 } + +func (c *sizeCounter) Write(p []byte) (n int, err error) { + c.size += int64(len(p)) + return len(p), nil +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dockerImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + if inputInfo.Digest.String() != "" { + // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. + // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. + // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. + haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false) + if err != nil { + return types.BlobInfo{}, err + } + if haveBlob { + return reusedInfo, nil + } + } + + // FIXME? Chunked upload, progress reporting, etc. + uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) + logrus.Debugf("Uploading %s", uploadPath) + res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil) + if err != nil { + return types.BlobInfo{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusAccepted { + logrus.Debugf("Error initiating layer upload, response %#v", *res) + return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry) + } + uploadLocation, err := res.Location() + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") + } + + digester := digest.Canonical.Digester() + sizeCounter := &sizeCounter{} + tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) + res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil) + if err != nil { + logrus.Debugf("Error uploading layer chunked, response %#v", res) + return types.BlobInfo{}, err + } + defer res.Body.Close() + computedDigest := digester.Digest() + + uploadLocation, err = res.Location() + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") + } + + // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) + + locationQuery := uploadLocation.Query() + // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 + locationQuery.Set("digest", computedDigest.String()) + uploadLocation.RawQuery = locationQuery.Encode() + res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + if err != nil { + return types.BlobInfo{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + logrus.Debugf("Error uploading layer, response %#v", *res) + return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation) + } + + logrus.Debugf("Upload of layer %s complete", computedDigest) + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref)) + return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil +} + +// blobExists returns true iff repo contains a blob with digest, and if so, also its size. +// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); +// it returns a non-nil error only on an unexpected failure. +func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { + checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) + logrus.Debugf("Checking %s", checkPath) + res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope) + if err != nil { + return false, -1, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusOK: + logrus.Debugf("... already exists") + return true, getBlobSize(res), nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name()) + case http.StatusNotFound: + logrus.Debugf("... not present") + return false, -1, nil + default: + return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) + } +} + +// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. +func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { + u := url.URL{ + Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), + RawQuery: url.Values{ + "mount": {srcDigest.String()}, + "from": {reference.Path(srcRepo)}, + }.Encode(), + } + mountPath := u.String() + logrus.Debugf("Trying to mount %s", mountPath) + res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope) + if err != nil { + return err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusCreated: + logrus.Debugf("... mount OK") + return nil + case http.StatusAccepted: + // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. + // Abort, and let the ultimate caller do an upload when its ready, instead. + // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. + uploadLocation, err := res.Location() + if err != nil { + return errors.Wrap(err, "Error determining upload URL after a mount attempt") + } + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) + res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) + if err != nil { + logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) + } else { + defer res2.Body.Close() + if res2.StatusCode != http.StatusNoContent { + logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) + } + } + // Anyway, if canceling the upload fails, ignore it and return the more important error: + return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + default: + logrus.Debugf("Error mounting, response %#v", *res) + return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + } +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + + // First, check whether the blob happens to already exist at the destination. + exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) + if err != nil { + return false, types.BlobInfo{}, err + } + if exists { + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + } + + // Then try reusing blobs from other locations. + for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { + candidateRepo, err := parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) + + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } + if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + logrus.Debug("... Already tried the primary destination") + continue + } + + // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. + + // Checking candidateRepo, and mounting from it, requires an + // expanded token scope. + extraScope := &authScope{ + remoteName: reference.Path(candidateRepo), + actions: "pull", + } + // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. + // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. + // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. + // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. + // Even worse, docker/distribution does not actually reasonably implement canceling uploads + // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); + // so, be a nice client and don't create unnecesary upload sessions on the server. + exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + if !exists { + // FIXME? Should we drop the blob from cache here (and elsewhere?)? + continue // logrus.Debug() already happened in blobExists + } + if candidateRepo.Name() != d.ref.ref.Name() { + if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { + logrus.Debugf("... Mount failed: %v", err) + continue + } + } + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil + } + + return false, types.BlobInfo{}, nil +} + +// PutManifest writes manifest to the destination. +// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list +// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the +// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + refTail := "" + if instanceDigest != nil { + // If the instanceDigest is provided, then use it as the refTail, because the reference, + // whether it includes a tag or a digest, refers to the list as a whole, and not this + // particular instance. + refTail = instanceDigest.String() + // Double-check that the manifest we've been given matches the digest we've been given. + matches, err := manifest.MatchesDigest(m, *instanceDigest) + if err != nil { + return errors.Wrapf(err, "error digesting manifest in PutManifest") + } + if !matches { + manifestDigest, merr := manifest.Digest(m) + if merr != nil { + return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr) + } + return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String()) + } + } else { + // Compute the digest of the main manifest, or the list if it's a list, so that we + // have a digest value to use if we're asked to save a signature for the manifest. + digest, err := manifest.Digest(m) + if err != nil { + return err + } + d.manifestDigest = digest + // The refTail should be either a digest (which we expect to match the value we just + // computed) or a tag name. + refTail, err = d.ref.tagOrDigest() + if err != nil { + return err + } + } + + path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) + + headers := map[string][]string{} + mimeType := manifest.GuessMIMEType(m) + if mimeType != "" { + headers["Content-Type"] = []string{mimeType} + } + res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if !successStatus(res.StatusCode) { + err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name()) + if isManifestInvalidError(errors.Cause(err)) { + err = types.ManifestTypeRejectedError{Err: err} + } + return err + } + return nil +} + +// successStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func successStatus(status int) bool { + return status >= 200 && status <= 399 +} + +// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. +func isManifestInvalidError(err error) bool { + errors, ok := err.(errcode.Errors) + if !ok || len(errors) == 0 { + return false + } + err = errors[0] + ec, ok := err.(errcode.ErrorCoder) + if !ok { + return false + } + + switch ec.ErrorCode() { + // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. + case v2.ErrorCodeManifestInvalid: + return true + // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) + // when uploading to a tag (because it can’t find a matching tag inside the manifest) + case v2.ErrorCodeTagInvalid: + return true + // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when + // uploading an OCI manifest that is (correctly, according to the spec) missing + // a top-level media type. See libpod issue #1719 + // FIXME: remove this case when ECR behavior is fixed + case errcode.ErrorCodeUnsupported: + return strings.Contains(err.Error(), "Invalid JSON syntax") + default: + return false + } +} + +// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + // Do not fail if we don’t really need to support signatures. + if len(signatures) == 0 { + return nil + } + if instanceDigest == nil { + if d.manifestDigest.String() == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures + return errors.Errorf("Unknown manifest digest, can't add signatures") + } + instanceDigest = &d.manifestDigest + } + + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.signatureBase != nil: + return d.putSignaturesToLookaside(signatures, instanceDigest) + case d.c.supportsSignatures: + return d.putSignaturesToAPIExtension(ctx, signatures, instanceDigest) + default: + return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") + } +} + +// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, +// which is not nil. +func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, instanceDigest *digest.Digest) error { + // FIXME? This overwrites files one at a time, definitely not atomic. + // A failure when updating signatures with a reordered copy could lose some of them. + + // Skip dealing with the manifest digest if not necessary. + if len(signatures) == 0 { + return nil + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + for i, signature := range signatures { + url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + err := d.putOneSignature(url, signature) + if err != nil { + return err + } + } + // Remove any other signatures, if present. + // We stop at the first missing signature; if a previous deleting loop aborted + // prematurely, this may not clean up all of them, but one missing signature + // is enough for dockerImageSource to stop looking for other signatures, so that + // is sufficient. + for i := len(signatures); ; i++ { + url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + missing, err := d.c.deleteOneSignature(url) + if err != nil { + return err + } + if missing { + break + } + } + + return nil +} + +// putOneSignature stores one signature to url. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { + switch url.Scheme { + case "file": + logrus.Debugf("Writing to %s", url.Path) + err := os.MkdirAll(filepath.Dir(url.Path), 0755) + if err != nil { + return err + } + err = ioutil.WriteFile(url.Path, signature, 0644) + if err != nil { + return err + } + return nil + + case "http", "https": + return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + default: + return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) + } +} + +// deleteOneSignature deletes a signature from url, if it exists. +// If it successfully determines that the signature does not exist, returns (true, nil) +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { + switch url.Scheme { + case "file": + logrus.Debugf("Deleting %s", url.Path) + err := os.Remove(url.Path) + if err != nil && os.IsNotExist(err) { + return true, nil + } + return false, err + + case "http", "https": + return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + default: + return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) + } +} + +// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. +func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + // Skip dealing with the manifest digest, or reading the old state, if not necessary. + if len(signatures) == 0 { + return nil + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures, + // but the X-Registry-Supports-Signatures API extension does not support that yet. + + existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, *instanceDigest) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range existingSignatures.Signatures { + existingSigNames[sig.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range existingSignatures.Signatures { + if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "Error generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", instanceDigest.String(), randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + sig := extensionSignature{ + Version: extensionSignatureSchemaVersion, + Name: signatureName, + Type: extensionSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String()) + res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + body, err := ioutil.ReadAll(res.Body) + if err == nil { + logrus.Debugf("Error body %s", string(body)) + } + logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) + return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry) + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go new file mode 100644 index 000000000..35beb30e5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -0,0 +1,457 @@ +package docker + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/docker/distribution/registry/client" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerImageSource struct { + ref dockerReference + c *dockerClient + // State + cachedManifest []byte // nil if not loaded yet + cachedManifestMIMEType string // Only valid if cachedManifest != nil +} + +// newImageSource creates a new ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) + if err != nil { + return nil, errors.Wrapf(err, "error loading registries configuration") + } + if registry == nil { + // No configuration was found for the provided reference, so use the + // equivalent of a default configuration. + registry = &sysregistriesv2.Registry{ + Endpoint: sysregistriesv2.Endpoint{ + Location: ref.ref.String(), + }, + Prefix: ref.ref.String(), + } + } + + primaryDomain := reference.Domain(ref.ref) + // Check all endpoints for the manifest availability. If we find one that does + // contain the image, it will be used for all future pull actions. Always try the + // non-mirror original location last; this both transparently handles the case + // of no mirrors configured, and ensures we return the error encountered when + // acessing the upstream location if all endpoints fail. + manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint") + pullSources, err := registry.PullSourcesFromReference(ref.ref) + if err != nil { + return nil, err + } + for _, pullSource := range pullSources { + logrus.Debugf("Trying to pull %q", pullSource.Reference) + dockerRef, err := newReference(pullSource.Reference) + if err != nil { + return nil, err + } + + endpointSys := sys + // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. + if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain { + copy := *endpointSys + copy.DockerAuthConfig = nil + endpointSys = © + } + + client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull") + if err != nil { + return nil, err + } + client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure + + testImageSource := &dockerImageSource{ + ref: dockerRef, + c: client, + } + + manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx) + if manifestLoadErr == nil { + return testImageSource, nil + } + } + return nil, manifestLoadErr +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dockerImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dockerImageSource) Close() error { + return nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) +// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. +func simplifyContentType(contentType string) string { + if contentType == "" { + return contentType + } + mimeType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return "" + } + return mimeType +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return s.fetchManifest(ctx, instanceDigest.String()) + } + err := s.ensureManifestIsLoaded(ctx) + if err != nil { + return nil, "", err + } + return s.cachedManifest, s.cachedManifestMIMEType, nil +} + +func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { + path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name()) + } + manblob, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, "", err + } + return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil +} + +// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType +// +// ImageSource implementations are not required or expected to do any caching, +// but because our signatures are “attached” to the manifest digest, +// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) +// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious +// signature verification failures when pulling while a tag is being updated. +func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { + if s.cachedManifest != nil { + return nil + } + + reference, err := s.ref.tagOrDigest() + if err != nil { + return err + } + + manblob, mt, err := s.fetchManifest(ctx, reference) + if err != nil { + return err + } + // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. + s.cachedManifest = manblob + s.cachedManifestMIMEType = mt + return nil +} + +func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + var ( + resp *http.Response + err error + ) + for _, url := range urls { + resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err == nil { + if resp.StatusCode != http.StatusOK { + err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode)) + logrus.Debug(err) + continue + } + break + } + } + if err != nil { + return nil, 0, err + } + return resp.Body, getBlobSize(resp), nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dockerImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + return s.getExternalBlob(ctx, info.URLs) + } + + path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, 0, err + } + if err := httpResponseToError(res); err != nil { + return nil, 0, err + } + cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref)) + return res.Body, getBlobSize(res), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if err := s.c.detectProperties(ctx); err != nil { + return nil, err + } + switch { + case s.c.signatureBase != nil: + return s.getSignaturesFromLookaside(ctx, instanceDigest) + case s.c.supportsSignatures: + return s.getSignaturesFromAPIExtension(ctx, instanceDigest) + default: + return [][]byte{}, nil + } +} + +// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, +// or finally, from a fetched manifest. +func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { + if instanceDigest != nil { + return *instanceDigest, nil + } + if digested, ok := s.ref.ref.(reference.Digested); ok { + d := digested.Digest() + if d.Algorithm() == digest.Canonical { + return d, nil + } + } + if err := s.ensureManifestIsLoaded(ctx); err != nil { + return "", err + } + return manifest.Digest(s.cachedManifest) +} + +// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, +// which is not nil. +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + signatures := [][]byte{} + for i := 0; ; i++ { + url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) + if url == nil { + return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + signature, missing, err := s.getOneSignature(ctx, url) + if err != nil { + return nil, err + } + if missing { + break + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// getOneSignature downloads one signature from url. +// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { + switch url.Scheme { + case "file": + logrus.Debugf("Reading %s", url.Path) + sig, err := ioutil.ReadFile(url.Path) + if err != nil { + if os.IsNotExist(err) { + return nil, true, nil + } + return nil, false, err + } + return sig, false, nil + + case "http", "https": + logrus.Debugf("GET %s", url) + req, err := http.NewRequest("GET", url.String(), nil) + if err != nil { + return nil, false, err + } + req = req.WithContext(ctx) + res, err := s.c.client.Do(req) + if err != nil { + return nil, false, err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return nil, true, nil + } else if res.StatusCode != http.StatusOK { + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + } + sig, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, false, err + } + return sig, false, nil + + default: + return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) + } +} + +// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest) + if err != nil { + return nil, err + } + + var sigs [][]byte + for _, sig := range parsedBody.Signatures { + if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// deleteImage deletes the named image from the registry, if supported. +func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { + // docker/distribution does not document what action should be used for deleting images. + // + // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. + // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. + // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). + // + // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". + c, err := newDockerClientFromRef(sys, ref, true, "*") + if err != nil { + return err + } + + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + refTail, err := ref.tagOrDigest() + if err != nil { + return err + } + getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) + get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer get.Body.Close() + manifestBody, err := ioutil.ReadAll(get.Body) + if err != nil { + return err + } + switch get.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) + default: + return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) + } + + digest := get.Header.Get("Docker-Content-Digest") + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + + // When retrieving the digest from a registry >= 2.3 use the following header: + // "Accept": "application/vnd.docker.distribution.manifest.v2+json" + delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer delete.Body.Close() + + body, err := ioutil.ReadAll(delete.Body) + if err != nil { + return err + } + if delete.StatusCode != http.StatusAccepted { + return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) + } + + if c.signatureBase != nil { + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return err + } + + for i := 0; ; i++ { + url := signatureStorageURL(c.signatureBase, manifestDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + missing, err := c.deleteOneSignature(url) + if err != nil { + return err + } + if missing { + break + } + } + } + + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go new file mode 100644 index 000000000..8b8e57968 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -0,0 +1,168 @@ +package docker + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for Docker registry-hosted images. +var Transport = dockerTransport{} + +type dockerTransport struct{} + +func (t dockerTransport) Name() string { + return "docker" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// dockerReference is an ImageReference for Docker images. +type dockerReference struct { + ref reference.Named // By construction we know that !reference.IsNameOnly(ref) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if !strings.HasPrefix(refString, "//") { + return nil, errors.Errorf("docker: image reference %s does not start with //", refString) + } + ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + return NewReference(ref) +} + +// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). +func NewReference(ref reference.Named) (types.ImageReference, error) { + return newReference(ref) +} + +// newReference returns a dockerReference for a named reference. +func newReference(ref reference.Named) (dockerReference, error) { + if reference.IsNameOnly(ref) { + return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // The docker/distribution API does not really support that (we can’t ask for an image with a specific + // tag and digest), so fail. This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported") + } + + return dockerReference{ + ref: ref, + }, nil +} + +func (ref dockerReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dockerReference) StringWithinTransport() string { + return "//" + reference.FamiliarString(ref.ref) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dockerReference) DockerReference() reference.Named { + return ref.ref +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dockerReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dockerReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.ref) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return deleteImage(ctx, sys, ref) +} + +// tagOrDigest returns a tag or digest from the reference. +func (ref dockerReference) tagOrDigest() (string, error) { + if ref, ok := ref.ref.(reference.Canonical); ok { + return ref.Digest().String(), nil + } + if ref, ok := ref.ref.(reference.NamedTagged); ok { + return ref.Tag(), nil + } + // This should not happen, NewReference above refuses reference.IsNameOnly values. + return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) +} diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go new file mode 100644 index 000000000..860868f41 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -0,0 +1,43 @@ +package docker + +import ( + "errors" + "fmt" + "net/http" + + "github.com/docker/distribution/registry/client" + perrors "github.com/pkg/errors" +) + +var ( + // ErrV1NotSupported is returned when we're trying to talk to a + // docker V1 registry. + ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") + // ErrTooManyRequests is returned when the status code returned is 429 + ErrTooManyRequests = errors.New("too many request to registry") +) + +// ErrUnauthorizedForCredentials is returned when the status code returned is 401 +type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ErrUnauthorizedForCredentials) Error() string { + return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error()) +} + +// httpResponseToError translates the https.Response into an error. It returns +// nil if the response is not considered an error. +func httpResponseToError(res *http.Response) error { + switch res.StatusCode { + case http.StatusOK: + return nil + case http.StatusTooManyRequests: + return ErrTooManyRequests + case http.StatusUnauthorized: + err := client.HandleErrorResponse(res) + return ErrUnauthorizedForCredentials{Err: err} + default: + return perrors.Errorf("invalid status code from registry %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } +} diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go new file mode 100644 index 000000000..918c0f838 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -0,0 +1,202 @@ +package docker + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/ghodss/yaml" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' +var systemRegistriesDirPath = builtinRegistriesDirPath + +// builtinRegistriesDirPath is the path to registries.d. +// DO NOT change this, instead see systemRegistriesDirPath above. +const builtinRegistriesDirPath = "/etc/containers/registries.d" + +// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. +// NOTE: Keep this in sync with docs/registries.d.md! +type registryConfiguration struct { + DefaultDocker *registryNamespace `json:"default-docker"` + // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), + Docker map[string]registryNamespace `json:"docker"` +} + +// registryNamespace defines lookaside locations for a single namespace. +type registryNamespace struct { + SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. + SigStoreStaging string `json:"sigstore-staging"` // For writing only. +} + +// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. +// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below. +type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported. + +// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. +func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) { + // FIXME? Loading and parsing the config could be cached across calls. + dirPath := registriesDirPath(sys) + logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) + config, err := loadAndMergeConfig(dirPath) + if err != nil { + return nil, err + } + + topLevel := config.signatureTopLevel(ref, write) + if topLevel == "" { + return nil, nil + } + + url, err := url.Parse(topLevel) + if err != nil { + return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) + } + // NOTE: Keep this in sync with docs/signature-protocols.md! + // FIXME? Restrict to explicitly supported schemes? + repo := reference.Path(ref.ref) // Note that this is without a tag or digest. + if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references + return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) + } + url.Path = url.Path + "/" + repo + return url, nil +} + +// registriesDirPath returns a path to registries.d +func registriesDirPath(sys *types.SystemContext) string { + if sys != nil { + if sys.RegistriesDirPath != "" { + return sys.RegistriesDirPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) + } + } + return systemRegistriesDirPath +} + +// loadAndMergeConfig loads configuration files in dirPath +func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { + mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} + dockerDefaultMergedFrom := "" + nsMergedFrom := map[string]string{} + + dir, err := os.Open(dirPath) + if err != nil { + if os.IsNotExist(err) { + return &mergedConfig, nil + } + return nil, err + } + configNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, configName := range configNames { + if !strings.HasSuffix(configName, ".yaml") { + continue + } + configPath := filepath.Join(dirPath, configName) + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + + var config registryConfiguration + err = yaml.Unmarshal(configBytes, &config) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing %s", configPath) + } + + if config.DefaultDocker != nil { + if mergedConfig.DefaultDocker != nil { + return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + dockerDefaultMergedFrom, configPath) + } + mergedConfig.DefaultDocker = config.DefaultDocker + dockerDefaultMergedFrom = configPath + } + + for nsName, nsConfig := range config.Docker { // includes config.Docker == nil + if _, ok := mergedConfig.Docker[nsName]; ok { + return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + nsName, nsMergedFrom[nsName], configPath) + } + mergedConfig.Docker[nsName] = nsConfig + nsMergedFrom[nsName] = configPath + } + } + + return &mergedConfig, nil +} + +// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. +// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used. +func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { + if config.Docker != nil { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if ns, ok := config.Docker[identity]; ok { + logrus.Debugf(` Using "docker" namespace %s`, identity) + if url := ns.signatureTopLevel(write); url != "" { + return url + } + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if ns, ok := config.Docker[name]; ok { + logrus.Debugf(` Using "docker" namespace %s`, name) + if url := ns.signatureTopLevel(write); url != "" { + return url + } + } + } + } + // Look for a default location + if config.DefaultDocker != nil { + logrus.Debugf(` Using "default-docker" configuration`) + if url := config.DefaultDocker.signatureTopLevel(write); url != "" { + return url + } + } + logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity()) + return "" +} + +// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. +// or "" if nothing has been configured. +func (ns registryNamespace) signatureTopLevel(write bool) string { + if write && ns.SigStoreStaging != "" { + logrus.Debugf(` Using %s`, ns.SigStoreStaging) + return ns.SigStoreStaging + } + if ns.SigStore != "" { + logrus.Debugf(` Using %s`, ns.SigStore) + return ns.SigStore + } + return "" +} + +// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. +// Returns nil iff base == nil. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { + if base == nil { + return nil + } + url := *base + url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) + return &url +} diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go new file mode 100644 index 000000000..61d9aab9a --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -0,0 +1,56 @@ +package policyconfiguration + +import ( + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/pkg/errors" +) + +// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceIdentity(ref reference.Named) (string, error) { + res := ref.Name() + tagged, isTagged := ref.(reference.NamedTagged) + digested, isDigested := ref.(reference.Canonical) + switch { + case isTagged && isDigested: // Note that this CAN actually happen. + return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) + case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() + return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) + case isTagged: + res = res + ":" + tagged.Tag() + case isDigested: + res = res + "@" + digested.Digest().String() + default: // Coverage: The above was supposed to be exhaustive. + return "", errors.New("Internal inconsistency, unexpected default branch") + } + return res, nil +} + +// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceNamespaces(ref reference.Named) []string { + // Look for a match of the repository, and then of the possible parent + // namespaces. Note that this only happens on the expanded host names + // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", + // then in its parent "docker.io/library"; in none of "busybox", + // un-namespaced "library" nor in "" supposedly implicitly representing "library/". + // + // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last + // iteration matches the host name (for any namespace). + res := []string{} + name := ref.Name() + for { + res = append(res, name) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/README.md b/vendor/github.com/containers/image/v5/docker/reference/README.md new file mode 100644 index 000000000..3c4d74eb4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/README.md @@ -0,0 +1,2 @@ +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, +except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v5/docker/reference/helpers.go b/vendor/github.com/containers/image/v5/docker/reference/helpers.go new file mode 100644 index 000000000..978df7eab --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go new file mode 100644 index 000000000..6a86ec64f --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go @@ -0,0 +1,181 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go new file mode 100644 index 000000000..8c0c23b2f --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go new file mode 100644 index 000000000..786034932 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go new file mode 100644 index 000000000..b02c60bb3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go @@ -0,0 +1,415 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. +type Destination struct { + writer io.Writer + tar *tar.Writer + repoTags []reference.NamedTagged + // Other state. + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs + config []byte +} + +// NewDestination returns a tarfile.Destination for the specified io.Writer. +func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination { + repoTags := []reference.NamedTagged{} + if ref != nil { + repoTags = append(repoTags, ref) + } + return &Destination{ + writer: dest, + tar: tar.NewWriter(dest), + repoTags: repoTags, + blobs: make(map[digest.Digest]types.BlobInfo), + } +} + +// AddRepoTags adds the specified tags to the destination's repoTags. +func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { + d.repoTags = append(d.repoTags, tags...) +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (d *Destination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *Destination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Storing signatures for docker tar files is not supported") +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *Destination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *Destination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *Destination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *Destination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Ouch, we need to stream the blob into a temporary file just to determine the size. + // When the layer is decompressed, we also have to generate the digest on uncompressed datas. + if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { + logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") + streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") + if err != nil { + return types.BlobInfo{}, err + } + defer os.Remove(streamCopy.Name()) + defer streamCopy.Close() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(streamCopy, tee) + if err != nil { + return types.BlobInfo{}, err + } + _, err = streamCopy.Seek(0, os.SEEK_SET) + if err != nil { + return types.BlobInfo{}, err + } + inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. + if inputInfo.Digest == "" { + inputInfo.Digest = digester.Digest() + } + stream = streamCopy + logrus.Debugf("... streaming done") + } + + // Maybe the blob has been already sent + ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false) + if err != nil { + return types.BlobInfo{}, err + } + if ok { + return reusedInfo, nil + } + + if isConfig { + buf, err := ioutil.ReadAll(stream) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream") + } + d.config = buf + if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file") + } + } else { + // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way + // writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described + // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) + // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers + // in the root of the tarball. + if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil { + return types.BlobInfo{}, err + } + } + d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size} + return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") + } + if blob, ok := d.blobs[info.Digest]; ok { + return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil + } + return false, types.BlobInfo{}, nil +} + +func (d *Destination) createRepositoriesFile(rootLayerID string) error { + repositories := map[string]map[string]string{} + for _, repoTag := range d.repoTags { + if val, ok := repositories[repoTag.Name()]; ok { + val[repoTag.Tag()] = rootLayerID + } else { + repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID} + } + } + + b, err := json.Marshal(repositories) + if err != nil { + return errors.Wrap(err, "Error marshaling repositories") + } + if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil { + return errors.Wrap(err, "Error writing config json file") + } + return nil +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported for docker tar files`) + } + // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, + // so the caller trying a different manifest kind would be pointless. + var man manifest.Schema2 + if err := json.Unmarshal(m, &man); err != nil { + return errors.Wrap(err, "Error parsing manifest") + } + if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { + return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") + } + + layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors) + if err != nil { + return err + } + + if len(man.LayersDescriptors) > 0 { + if err := d.createRepositoriesFile(lastLayerID); err != nil { + return err + } + } + + repoTags := []string{} + for _, tag := range d.repoTags { + // For github.com/docker/docker consumers, this works just as well as + // refString := ref.String() + // because when reading the RepoTags strings, github.com/docker/docker/reference + // normalizes both of them to the same value. + // + // Doing it this way to include the normalized-out `docker.io[/library]` does make + // a difference for github.com/projectatomic/docker consumers, with the + // “Add --add-registry and --block-registry options to docker daemon” patch. + // These consumers treat reference strings which include a hostname and reference + // strings without a hostname differently. + // + // Using the host name here is more explicit about the intent, and it has the same + // effect as (docker pull) in projectatomic/docker, which tags the result using + // a hostname-qualified reference. + // See https://github.com/containers/image/issues/72 for a more detailed + // analysis and explanation. + refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) + repoTags = append(repoTags, refString) + } + + items := []ManifestItem{{ + Config: man.ConfigDescriptor.Digest.Hex() + ".json", + RepoTags: repoTags, + Layers: layerPaths, + Parent: "", + LayerSources: nil, + }} + itemsBytes, err := json.Marshal(&items) + if err != nil { + return err + } + + // FIXME? Do we also need to support the legacy format? + return d.sendBytes(manifestFileName, itemsBytes) +} + +// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers +func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) { + var chainID digest.Digest + lastLayerID = "" + for i, l := range layerDescriptors { + // This chainID value matches the computation in docker/docker/layer.CreateChainID … + if chainID == "" { + chainID = l.Digest + } else { + chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) + } + // … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent + // versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop. + // + // Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID + // (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more + // times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation + // which also mixes in the full image configuration seems unnecessary, at least as long as we are storing + // only a single image per tarball, i.e. all DiffID prefixes are unique (can’t differ only with + // configuration). + layerID := chainID.Hex() + + physicalLayerPath := l.Digest.Hex() + ".tar" + // The layer itself has been stored into physicalLayerPath in PutManifest. + // So, use that path for layerPaths used in the non-legacy manifest + layerPaths = append(layerPaths, physicalLayerPath) + // ... and create a symlink for the legacy format; + if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { + return nil, "", errors.Wrap(err, "Error creating layer symbolic link") + } + + b := []byte("1.0") + if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil { + return nil, "", errors.Wrap(err, "Error writing VERSION file") + } + + // The legacy format requires a config file per layer + layerConfig := make(map[string]interface{}) + layerConfig["id"] = layerID + + // The root layer doesn't have any parent + if lastLayerID != "" { + layerConfig["parent"] = lastLayerID + } + // The root layer configuration file is generated by using subpart of the image configuration + if i == len(layerDescriptors)-1 { + var config map[string]*json.RawMessage + err := json.Unmarshal(d.config, &config) + if err != nil { + return nil, "", errors.Wrap(err, "Error unmarshaling config") + } + for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { + layerConfig[attr] = config[attr] + } + } + b, err := json.Marshal(layerConfig) + if err != nil { + return nil, "", errors.Wrap(err, "Error marshaling layer config") + } + if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil { + return nil, "", errors.Wrap(err, "Error writing config json file") + } + + lastLayerID = layerID + } + return layerPaths, lastLayerID, nil +} + +type tarFI struct { + path string + size int64 + isSymlink bool +} + +func (t *tarFI) Name() string { + return t.path +} +func (t *tarFI) Size() int64 { + return t.size +} +func (t *tarFI) Mode() os.FileMode { + if t.isSymlink { + return os.ModeSymlink + } + return 0444 +} +func (t *tarFI) ModTime() time.Time { + return time.Unix(0, 0) +} +func (t *tarFI) IsDir() bool { + return false +} +func (t *tarFI) Sys() interface{} { + return nil +} + +// sendSymlink sends a symlink into the tar stream. +func (d *Destination) sendSymlink(path string, target string) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) + if err != nil { + return nil + } + logrus.Debugf("Sending as tar link %s -> %s", path, target) + return d.tar.WriteHeader(hdr) +} + +// sendBytes sends a path into the tar stream. +func (d *Destination) sendBytes(path string, b []byte) error { + return d.sendFile(path, int64(len(b)), bytes.NewReader(b)) +} + +// sendFile sends a file into the tar stream. +func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") + if err != nil { + return nil + } + logrus.Debugf("Sending as tar file %s", path) + if err := d.tar.WriteHeader(hdr); err != nil { + return err + } + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + size, err := io.Copy(d.tar, stream) + if err != nil { + return err + } + if size != expectedSize { + return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + } + return nil +} + +// PutSignatures would add the given signatures to the docker tarfile (currently not supported). +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents). +func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.Errorf(`Manifest lists are not supported for docker tar files`) + } + if len(signatures) != 0 { + return errors.Errorf("Storing signatures for docker tar files is not supported") + } + return nil +} + +// Commit finishes writing data to the underlying io.Writer. +// It is the caller's responsibility to close it, if necessary. +func (d *Destination) Commit(ctx context.Context) error { + return d.tar.Close() +} diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/doc.go b/vendor/github.com/containers/image/v5/docker/tarfile/doc.go new file mode 100644 index 000000000..4ea5369c0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/tarfile/doc.go @@ -0,0 +1,3 @@ +// Package tarfile is an internal implementation detail of some transports. +// Do not use outside of the github.com/containers/image repo! +package tarfile diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go new file mode 100644 index 000000000..ad0a3d2cb --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go @@ -0,0 +1,490 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + "sync" + + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Source is a partial implementation of types.ImageSource for reading from tarPath. +type Source struct { + tarPath string + removeTarPathOnClose bool // Remove temp file on close if true + // The following data is only available after ensureCachedDataIsPresent() succeeds + tarManifest *ManifestItem // nil if not available yet. + configBytes []byte + configDigest digest.Digest + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo + // Other state + generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. + cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe + cacheDataResult error // Private state for ensureCachedDataIsPresent +} + +type layerInfo struct { + path string + size int64 +} + +// TODO: We could add support for multiple images in a single archive, so +// that people could use docker-archive:opensuse.tar:opensuse:leap as +// the source of an image. +// To do for both the NewSourceFromFile and NewSourceFromStream functions + +// NewSourceFromFile returns a tarfile.Source for the specified path. +func NewSourceFromFile(path string) (*Source, error) { + file, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "error opening file %q", path) + } + defer file.Close() + + // If the file is already not compressed we can just return the file itself + // as a source. Otherwise we pass the stream to NewSourceFromStream. + stream, isCompressed, err := compression.AutoDecompress(file) + if err != nil { + return nil, errors.Wrapf(err, "Error detecting compression for file %q", path) + } + defer stream.Close() + if !isCompressed { + return &Source{ + tarPath: path, + }, nil + } + return NewSourceFromStream(stream) +} + +// NewSourceFromStream returns a tarfile.Source for the specified inputStream, +// which can be either compressed or uncompressed. The caller can close the +// inputStream immediately after NewSourceFromFile returns. +func NewSourceFromStream(inputStream io.Reader) (*Source, error) { + // FIXME: use SystemContext here. + // Save inputStream to a temporary file + tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar") + if err != nil { + return nil, errors.Wrap(err, "error creating temporary file") + } + defer tarCopyFile.Close() + + succeeded := false + defer func() { + if !succeeded { + os.Remove(tarCopyFile.Name()) + } + }() + + // In order to be compatible with docker-load, we need to support + // auto-decompression (it's also a nice quality-of-life thing to avoid + // giving users really confusing "invalid tar header" errors). + uncompressedStream, _, err := compression.AutoDecompress(inputStream) + if err != nil { + return nil, errors.Wrap(err, "Error auto-decompressing input") + } + defer uncompressedStream.Close() + + // Copy the plain archive to the temporary file. + // + // TODO: This can take quite some time, and should ideally be cancellable + // using a context.Context. + if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { + return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name()) + } + succeeded = true + + return &Source{ + tarPath: tarCopyFile.Name(), + removeTarPathOnClose: true, + }, nil +} + +// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. +type tarReadCloser struct { + *tar.Reader + backingFile *os.File +} + +func (t *tarReadCloser) Close() error { + return t.backingFile.Close() +} + +// openTarComponent returns a ReadCloser for the specific file within the archive. +// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), +// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. +// The caller should call .Close() on the returned stream. +func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) { + f, err := os.Open(s.tarPath) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + f.Close() + } + }() + + tarReader, header, err := findTarComponent(f, componentPath) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested + // We follow only one symlink; so no loops are possible. + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, + // so we don't care. + tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + } + + if !header.FileInfo().Mode().IsRegular() { + return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + } + succeeded = true + return &tarReadCloser{Reader: tarReader, backingFile: f}, nil +} + +// findTarComponent returns a header and a reader matching path within inputFile, +// or (nil, nil, nil) if not found. +func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { + t := tar.NewReader(inputFile) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, err + } + if h.Name == path { + return t, h, nil + } + } + return nil, nil, nil +} + +// readTarComponent returns full contents of componentPath. +func (s *Source) readTarComponent(path string) ([]byte, error) { + file, err := s.openTarComponent(path) + if err != nil { + return nil, errors.Wrapf(err, "Error loading tar component %s", path) + } + defer file.Close() + bytes, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ensureCachedDataIsPresent loads data necessary for any of the public accessors. +// It is safe to call this from multi-threaded code. +func (s *Source) ensureCachedDataIsPresent() error { + s.cacheDataLock.Do(func() { + s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() + }) + return s.cacheDataResult +} + +// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. +// Call ensureCachedDataIsPresent instead. +func (s *Source) ensureCachedDataIsPresentPrivate() error { + // Read and parse manifest.json + tarManifest, err := s.loadTarManifest() + if err != nil { + return err + } + + // Check to make sure length is 1 + if len(tarManifest) != 1 { + return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) + } + + // Read and parse config. + configBytes, err := s.readTarComponent(tarManifest[0].Config) + if err != nil { + return err + } + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. + if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { + return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) + } + + knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) + if err != nil { + return err + } + + // Success; commit. + s.tarManifest = &tarManifest[0] + s.configBytes = configBytes + s.configDigest = digest.FromBytes(configBytes) + s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs + s.knownLayers = knownLayers + return nil +} + +// loadTarManifest loads and decodes the manifest.json. +func (s *Source) loadTarManifest() ([]ManifestItem, error) { + // FIXME? Do we need to deal with the legacy format? + bytes, err := s.readTarComponent(manifestFileName) + if err != nil { + return nil, err + } + var items []ManifestItem + if err := json.Unmarshal(bytes, &items); err != nil { + return nil, errors.Wrap(err, "Error decoding tar manifest.json") + } + return items, nil +} + +// Close removes resources associated with an initialized Source, if any. +func (s *Source) Close() error { + if s.removeTarPathOnClose { + return os.Remove(s.tarPath) + } + return nil +} + +// LoadTarManifest loads and decodes the manifest.json +func (s *Source) LoadTarManifest() ([]ManifestItem, error) { + return s.loadTarManifest() +} + +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { + // Collect layer data available in manifest and config. + if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { + return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + } + knownLayers := map[digest.Digest]*layerInfo{} + unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. + for i, diffID := range parsedConfig.RootFS.DiffIDs { + if _, ok := knownLayers[diffID]; ok { + // Apparently it really can happen that a single image contains the same layer diff more than once. + // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter + // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. + continue + } + layerPath := tarManifest.Layers[i] + if _, ok := unknownLayerSizes[layerPath]; ok { + return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + } + li := &layerInfo{ // A new element in each iteration + path: layerPath, + size: -1, + } + knownLayers[diffID] = li + unknownLayerSizes[layerPath] = li + } + + // Scan the tar file to collect layer sizes. + file, err := os.Open(s.tarPath) + if err != nil { + return nil, err + } + defer file.Close() + t := tar.NewReader(file) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if li, ok := unknownLayerSizes[h.Name]; ok { + // Since GetBlob will decompress layers that are compressed we need + // to do the decompression here as well, otherwise we will + // incorrectly report the size. Pretty critical, since tools like + // umoci always compress layer blobs. Obviously we only bother with + // the slower method of checking if it's compressed. + uncompressedStream, isCompressed, err := compression.AutoDecompress(t) + if err != nil { + return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name) + } + defer uncompressedStream.Close() + + uncompressedSize := h.Size + if isCompressed { + uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + if err != nil { + return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name) + } + } + li.size = uncompressedSize + delete(unknownLayerSizes, h.Name) + } + } + if len(unknownLayerSizes) != 0 { + return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + } + + return knownLayers, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary instances. +func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`) + } + if s.generatedManifest == nil { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, "", err + } + m := manifest.Schema2{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: manifest.Schema2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: int64(len(s.configBytes)), + Digest: s.configDigest, + }, + LayersDescriptors: []manifest.Schema2Descriptor{}, + } + for _, diffID := range s.orderedDiffIDList { + li, ok := s.knownLayers[diffID] + if !ok { + return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + } + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball + MediaType: manifest.DockerV2Schema2LayerMediaType, + Size: li.size, + }) + } + manifestBytes, err := json.Marshal(&m) + if err != nil { + return nil, "", err + } + s.generatedManifest = manifestBytes + } + return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil +} + +// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. +type uncompressedReadCloser struct { + io.Reader + underlyingCloser func() error + uncompressedCloser func() error +} + +func (r uncompressedReadCloser) Close() error { + var res error + if err := r.uncompressedCloser(); err != nil { + res = err + } + if err := r.underlyingCloser(); err != nil && res == nil { + res = err + } + return res +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *Source) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, 0, err + } + + if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. + return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + } + + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, + underlyingStream, err := s.openTarComponent(li.path) + if err != nil { + return nil, 0, err + } + closeUnderlyingStream := true + defer func() { + if closeUnderlyingStream { + underlyingStream.Close() + } + }() + + // In order to handle the fact that digests != diffIDs (and thus that a + // caller which is trying to verify the blob will run into problems), + // we need to decompress blobs. This is a bit ugly, but it's a + // consequence of making everything addressable by their DiffID rather + // than by their digest... + // + // In particular, because the v2s2 manifest being generated uses + // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of + // layers not their _actual_ digest. The result is that copy/... will + // be verifing a "digest" which is not the actual layer's digest (but + // is instead the DiffID). + + uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) + if err != nil { + return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest) + } + + newStream := uncompressedReadCloser{ + Reader: uncompressedStream, + underlyingCloser: underlyingStream.Close, + uncompressedCloser: uncompressedStream.Close, + } + closeUnderlyingStream = false + + return newStream, li.size, nil + } + + return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } + return [][]byte{}, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/tarfile/types.go new file mode 100644 index 000000000..ac222528a --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/tarfile/types.go @@ -0,0 +1,28 @@ +package tarfile + +import ( + "github.com/containers/image/v5/manifest" + "github.com/opencontainers/go-digest" +) + +// Various data structures. + +// Based on github.com/docker/docker/image/tarexport/tarexport.go +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +// ManifestItem is an element of the array stored in the top-level manifest.json file. +type ManifestItem struct { + Config string + RepoTags []string + Layers []string + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` +} + +type imageID string diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go new file mode 100644 index 000000000..23664a74a --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -0,0 +1,159 @@ +package docker + +// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. + +import ( + "net/http" + "strings" +) + +// challenge carries information from a WWW-Authenticate response header. +// See RFC 7235. +type challenge struct { + // Scheme is the auth-scheme according to RFC 7235 + Scheme string + + // Parameters are the auth-params according to RFC 7235 + Parameters map[string]string +} + +// Octet types from RFC 7230. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []challenge { + challenges := []challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +// NOTE: This is not a fully compliant parser per RFC 7235: +// Most notably it does not support more than one challenge within a single header +// Some of the whitespace parsing also seems noncompliant. +// But it is clearly better than what we used to have… +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/image/docker_list.go new file mode 100644 index 000000000..651c301aa --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_list.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + list, err := manifest.Schema2ListFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing schema2 manifest list") + } + targetManifestDigest, err := list.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "Error choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "Error loading manifest for target platform") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/image/docker_schema1.go new file mode 100644 index 000000000..1a1c39d55 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_schema1.go @@ -0,0 +1,202 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestSchema1 struct { + m *manifest.Schema1 +} + +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { + m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema1) manifestMIMEType() string { + return manifest.DockerV2Schema1SignedMediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { + return nil, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + v2s2, err := m.convertToManifestSchema2(nil, nil) + if err != nil { + return nil, err + } + return v2s2.OCIConfig(ctx) +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + // This is a bit convoluted: We can’t just have a "get embedded docker reference" method + // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually + // embed a full docker/distribution reference, but only the repo name and tag (without the host name). + // So we would have to provide a “return repo without host name, and tag” getter for the generic code, + // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the + // generic copy code needs to know about is reference.Named and that a manifest may need updating + // for some destinations. + name := reference.Path(ref) + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } else { + tag = "" + } + return m.m.Name != name || m.m.Tag != tag +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { + return m.m.Inspect(nil) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + if options.EmbeddedDockerReference != nil { + copy.m.Name = reference.Path(options.EmbeddedDockerReference) + if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { + copy.m.Tag = tagged.Tag() + } else { + copy.m.Tag = "" + } + } + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + case manifest.DockerV2Schema2MediaType: + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return memoryImageFromManifest(m2), nil + case imgspecv1.MediaTypeImageManifest: + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ + ManifestMIMEType: imgspecv1.MediaTypeImageManifest, + InformationOnly: options.InformationOnly, + }) + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +// Based on github.com/docker/docker/distribution/pull_v2.go +func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { + if len(m.m.ExtractedV1Compatibility) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + } + if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) + } + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + } + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + } + + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index + + if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { + var size int64 + if uploadedLayerInfos != nil { + size = uploadedLayerInfos[v2Index].Size + } + var d digest.Digest + if layerDiffIDs != nil { + d = layerDiffIDs[v2Index] + } + layers = append(layers, manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Size: size, + Digest: m.m.FSLayers[v1Index].BlobSum, + }) + diffIDs = append(diffIDs, d) + } + } + configJSON, err := m.m.ToSchema2Config(diffIDs) + if err != nil { + return nil, err + } + configDescriptor := manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.container.image.v1+json", + Size: int64(len(configJSON)), + Digest: digest.FromBytes(configJSON), + } + + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil +} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go new file mode 100644 index 000000000..254c13f78 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -0,0 +1,357 @@ +package image + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 +} + +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema2{ + src: src, + m: m, + }, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + configBlob, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields + // than OCI v1. This unmarshal makes sure we drop docker v2s2 + // fields that aren't needed in OCI v1. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(configBlob, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: + return copy.convertToManifestSchema1(ctx, options.InformationOnly.Destination) + case imgspecv1.MediaTypeImageManifest: + return copy.convertToManifestOCI1(ctx) + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Image, error) { + configOCI, err := m.OCIConfig(ctx) + if err != nil { + return nil, err + } + configOCIBytes, err := json.Marshal(configOCI) + if err != nil { + return nil, err + } + + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + } + + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + for idx := range layers { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + switch m.m.LayersDescriptors[idx].MediaType { + case manifest.DockerV2Schema2ForeignLayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case manifest.DockerV2SchemaLayerMediaTypeUncompressed: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema2LayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) + } + } + + m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) + return memoryImageFromManifest(m1), nil +} + +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest types.ImageDestination) (types.Image, error) { + configBytes, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + imageConfig := &manifest.Schema2Image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) + if err != nil { + return nil, errors.Wrap(err, "Error uploading empty layer") + } + if info.Digest != GzippedEmptyLayerDigest { + return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest) + } + haveGzippedEmptyLayer = true + } + blobDigest = GzippedEmptyLayerDigest + } else { + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + } + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := manifest.Schema1V1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } + return memoryImageFromManifest(m1), nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]interface{}{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go new file mode 100644 index 000000000..fe66da157 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/manifest.go @@ -0,0 +1,75 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// genericManifest is an interface for parsing, modifying image manifests and related data. +// Note that the public methods are intended to be a subset of types.Image +// so that embedding a genericManifest into structs works. +// will support v1 one day... +type genericManifest interface { + serialize() ([]byte, error) + manifestMIMEType() string + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() types.BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*imgspecv1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*types.ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) +} + +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + return manifestSchema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return manifestOCI1FromManifest(src, manblob) + case manifest.DockerV2Schema2MediaType: + return manifestSchema2FromManifest(src, manblob) + case manifest.DockerV2ListMediaType: + return manifestSchema2FromManifestList(ctx, sys, src, manblob) + case imgspecv1.MediaTypeImageIndex: + return manifestOCI1FromImageIndex(ctx, sys, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. +func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { + blobs := make([]types.BlobInfo, len(layers)) + for i, layer := range layers { + blobs[i] = layer.BlobInfo + } + return blobs +} diff --git a/vendor/github.com/containers/image/v5/image/memory.go b/vendor/github.com/containers/image/v5/image/memory.go new file mode 100644 index 000000000..4c96b37d8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/memory.go @@ -0,0 +1,64 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// memoryImage is a mostly-implementation of types.Image assembled from data +// created in memory, used primarily as a return value of types.Image.UpdatedImage +// as a way to carry various structured information in a type-safe and easy-to-use way. +// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone +// collection of all related information, e.g. there is no way to get layer blobs +// from a memoryImage. +type memoryImage struct { + genericManifest + serializedManifest []byte // A private cache for Manifest() +} + +func memoryImageFromManifest(m genericManifest) types.Image { + return &memoryImage{ + genericManifest: m, + serializedManifest: nil, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *memoryImage) Reference() types.ImageReference { + // It would really be inappropriate to return the ImageReference of the image this was based on. + return nil +} + +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + return -1, nil +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.serializedManifest == nil { + m, err := i.genericManifest.serialize() + if err != nil { + return nil, "", err + } + i.serializedManifest = m + } + return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { + // Modifying an image invalidates signatures; a caller asking the updated image for signatures + // is probably confused. + return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") +} + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go new file mode 100644 index 000000000..18a38d463 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/oci.go @@ -0,0 +1,214 @@ +package image + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 +} + +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestOCI1{ + src: src, + m: m, + }, nil +} + +// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { + return &manifestOCI1{ + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), + } +} + +func (m *manifestOCI1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestOCI1) manifestMIMEType() string { + return imgspecv1.MediaTypeImageManifest +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestOCI1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + cb, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestOCI1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // We can't directly convert to V1, but we can transitively convert via a V2 image + m2, err := copy.convertToManifestSchema2() + if err != nil { + return nil, err + } + return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ + ManifestMIMEType: options.ManifestMIMEType, + InformationOnly: options.InformationOnly, + }) + case manifest.DockerV2Schema2MediaType: + return copy.convertToManifestSchema2() + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { + // Create a copy of the descriptor. + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + + // The only difference between OCI and DockerSchema2 is the mediatypes. The + // media type of the manifest is handled by manifestSchema2FromComponents. + config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + for idx := range layers { + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + switch layers[idx].MediaType { + case imgspecv1.MediaTypeImageLayerNonDistributable: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + case imgspecv1.MediaTypeImageLayer: + layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + case imgspecv1.MediaTypeImageLayerGzip: + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + case imgspecv1.MediaTypeImageLayerZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) + } + } + + // Rather than copying the ConfigBlob now, we just pass m.src to the + // translated manifest, since the only difference is the mediatype of + // descriptors there is no change to any blob stored in m.src. + m1 := manifestSchema2FromComponents(config, m.src, nil, layers) + return memoryImageFromManifest(m1), nil +} diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/image/oci_index.go new file mode 100644 index 000000000..022e03aca --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/oci_index.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + index, err := manifest.OCI1IndexFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing OCI1 index") + } + targetManifestDigest, err := index.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "Error choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "Error loading manifest for target platform") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go new file mode 100644 index 000000000..3a016e1d0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/sourced.go @@ -0,0 +1,104 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v5/types" +) + +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() +} + +// sourcedImage is a general set of utilities for working with container images, +// whatever is their underlying location (i.e. dockerImageSource-independent). +// Note the existence of skopeo/docker.Image: some instances of a `types.Image` +// may not be a `sourcedImage` directly. However, most users of `types.Image` +// do not care, and those who care about `skopeo/docker.Image` know they do. +type sourcedImage struct { + *UnparsedImage + manifestBlob []byte + manifestMIMEType string + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &sourcedImage{ + UnparsedImage: unparsed, + manifestBlob: manifestBlob, + manifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *sourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { + return i.manifestBlob, i.manifestMIMEType, nil +} + +func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go new file mode 100644 index 000000000..4e3028d85 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -0,0 +1,95 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +type UnparsedImage struct { + src types.ImageSource + instanceDigest *digest.Digest + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. + return i.src.Reference() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go new file mode 100644 index 000000000..88e123cdd --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go @@ -0,0 +1,73 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +// Key represents a single key linked to one or more kernel keyrings. +type Key struct { + Name string + + id, ring keyID + size int +} + +// ID returns the 32-bit kernel identifier for a specific key +func (k *Key) ID() int32 { + return int32(k.id) +} + +// Get the key's value as a byte slice +func (k *Key) Get() ([]byte, error) { + var ( + b []byte + err error + sizeRead int + ) + + if k.size == 0 { + k.size = 512 + } + + size := k.size + + b = make([]byte, int(size)) + sizeRead = size + 1 + for sizeRead > size { + r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) + if err != nil { + return nil, err + } + + if sizeRead = int(r1); sizeRead > size { + b = make([]byte, sizeRead) + size = sizeRead + sizeRead = size + 1 + } else { + k.size = sizeRead + } + } + return b[:k.size], err +} + +// Unlink a key from the keyring it was loaded from (or added to). If the key +// is not linked to any other keyrings, it is destroyed. +func (k *Key) Unlink() error { + _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) + return err +} + +// Describe returns a string describing the attributes of a specified key +func (k *Key) Describe() (string, error) { + keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) + if err != nil { + return "", err + } + return keyAttr, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go new file mode 100644 index 000000000..4bf170156 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go @@ -0,0 +1,120 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) +// +// Deprecated: Most callers should use either golang.org/x/sys/unix directly, +// or the original (and more extensive) github.com/jsipprell/keyctl . +package keyctl + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Keyring is the basic interface to a linux keyctl keyring. +type Keyring interface { + ID + Add(string, []byte) (*Key, error) + Search(string) (*Key, error) +} + +type keyring struct { + id keyID +} + +// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. +type ID interface { + ID() int32 +} + +// Add a new key to a keyring. The key can be searched for later by name. +func (kr *keyring) Add(name string, key []byte) (*Key, error) { + r, err := unix.AddKey("user", name, key, int(kr.id)) + if err == nil { + key := &Key{Name: name, id: keyID(r), ring: kr.id} + return key, nil + } + return nil, err +} + +// Search for a key by name, this also searches child keyrings linked to this +// one. The key, if found, is linked to the top keyring that Search() was called +// from. +func (kr *keyring) Search(name string) (*Key, error) { + id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) + if err == nil { + return &Key{Name: name, id: keyID(id), ring: kr.id}, nil + } + return nil, err +} + +// ID returns the 32-bit kernel identifier of a keyring +func (kr *keyring) ID() int32 { + return int32(kr.id) +} + +// SessionKeyring returns the current login session keyring +func SessionKeyring() (Keyring, error) { + return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) +} + +// UserKeyring returns the keyring specific to the current user. +func UserKeyring() (Keyring, error) { + return newKeyring(unix.KEY_SPEC_USER_KEYRING) +} + +// Unlink an object from a keyring +func Unlink(parent Keyring, child ID) error { + _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) + return err +} + +// Link a key into a keyring +func Link(parent Keyring, child ID) error { + _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) + return err +} + +// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it +func ReadUserKeyring() ([]*Key, error) { + var ( + b []byte + err error + sizeRead int + ) + krSize := 4 + size := krSize + b = make([]byte, size) + sizeRead = size + 1 + for sizeRead > size { + r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) + if err != nil { + return nil, err + } + + if sizeRead = int(r1); sizeRead > size { + b = make([]byte, sizeRead) + size = sizeRead + sizeRead = size + 1 + } else { + krSize = sizeRead + } + } + keyIDs := getKeyIDsFromByte(b[:krSize]) + return keyIDs, err +} + +func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { + idSize := 4 + var keys []*Key + for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { + tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) + keys = append(keys, &Key{id: keyID(tempID)}) + } + return keys +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go new file mode 100644 index 000000000..ae9697149 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go @@ -0,0 +1,33 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +// KeyPerm represents in-kernel access control permission to keys and keyrings +// as a 32-bit integer broken up into four permission sets, one per byte. +// In MSB order, the perms are: Processor, User, Group, Other. +type KeyPerm uint32 + +const ( + // PermOtherAll sets all permission for Other + PermOtherAll KeyPerm = 0x3f << (8 * iota) + // PermGroupAll sets all permission for Group + PermGroupAll + // PermUserAll sets all permission for User + PermUserAll + // PermProcessAll sets all permission for Processor + PermProcessAll +) + +// SetPerm sets the permissions on a key or keyring. +func SetPerm(k ID, p KeyPerm) error { + err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) + return err +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go new file mode 100644 index 000000000..196c82760 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go @@ -0,0 +1,25 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +type keyID int32 + +func newKeyring(id keyID) (*keyring, error) { + r1, err := unix.KeyctlGetKeyringID(int(id), true) + if err != nil { + return nil, err + } + + if id < 0 { + r1 = int(id) + } + return &keyring{id: keyID(r1)}, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go new file mode 100644 index 000000000..8c776929c --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go @@ -0,0 +1,29 @@ +package tmpdir + +import ( + "os" + "runtime" +) + +// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path' +var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles + +// builtinUnixTempDirForBigFiles is the directory path to store big files. +// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. +// DO NOT change this, instead see unixTempDirForBigFiles above. +const builtinUnixTempDirForBigFiles = "/var/tmp" + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func TemporaryDirectoryForBigFiles() string { + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = unixTempDirForBigFiles + } + return temporaryDirectoryForBigFiles +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go new file mode 100644 index 000000000..58527d713 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -0,0 +1,316 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! + ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) + SchemaVersion int `json:"schemaVersion"` +} + +type schema1V1CompatibilityContainerConfig struct { + Cmd []string +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if err := s1.initialize(); err != nil { + return nil, err + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + s1 := Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } + if err := s1.initialize(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. +func (m *Schema1) initialize() error { + if len(m.FSLayers) != len(m.History) { + return errors.New("length of history not equal to number of layers") + } + if len(m.FSLayers) == 0 { + return errors.New("no FSLayers in manifest") + } + m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) + for i, h := range m.History { + if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { + return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i) + } + } + return nil +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []LayerInfo { + layers := make([]LayerInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, + EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, + } + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) + for i, info := range layerInfos { + // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + // m.initialize() has verified that len(m.FSLayers) == len(m.History) + for _, compat := range m.ExtractedV1Compatibility { + if err := validateV1ID(compat.ID); err != nil { + return err + } + } + if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range m.ExtractedV1Compatibility { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { + if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: m.Tag, + Created: &s1.Created, + DockerVersion: s1.DockerVersion, + Architecture: s1.Architecture, + Os: s1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s1.Config != nil { + i.Labels = s1.Config.Labels + i.Env = s1.Config.Env + } + return i, nil +} + +// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s1 := Schema2V1Image{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s1) + if err != nil { + return nil, errors.Wrapf(err, "error decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s1) + if err != nil { + return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, compat := range m.ExtractedV1Compatibility { + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2Config(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go new file mode 100644 index 000000000..d768d6e11 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -0,0 +1,349 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/pkg/strslice" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. +func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + MediaType: desc.MediaType, + } +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedSchema2MediaType(s2.MediaType); err != nil { + return nil, err + } + for _, layer := range s2.LayersDescriptors { + if err := SupportedSchema2MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromSchema2Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +// isSchema2ForeignLayer is a convenience wrapper to check if a given mime type +// is a compressed or decompressed schema 2 foreign layer. +func isSchema2ForeignLayer(mimeType string) bool { + switch mimeType { + case DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip: + return true + default: + return false + } +} + +// isSchema2Layer is a convenience wrapper to check if a given mime type is a +// compressed or decompressed schema 2 layer. +func isSchema2Layer(mimeType string) bool { + switch mimeType { + case DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType: + return true + default: + return false + } +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + // First make sure we support the media type of the original layer. + if err := SupportedSchema2MediaType(original[i].MediaType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) + } + + // Set the correct media types based on the specified compression + // operation, the desired compression algorithm AND the original media + // type. + // + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed, which + // is being addressed in `isSchema2{Foreign}Layer`. + switch info.CompressionOperation { + case types.PreserveOriginal: + // Keep the original media type. + m.LayersDescriptors[i].MediaType = original[i].MediaType + + case types.Decompress: + // Decompress the original media type and check if it was + // non-distributable one or not. + mimeType := original[i].MediaType + switch { + case isSchema2ForeignLayer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaType + case isSchema2Layer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2SchemaLayerMediaTypeUncompressed + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) + } + + case types.Compress: + if info.CompressionAlgorithm == nil { + logrus.Debugf("Preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) + m.LayersDescriptors[i].MediaType = original[i].MediaType + break + } + // Compress the original media type and set the new one based on + // that type (distributable or not) and the specified compression + // algorithm. Throw an error if the algorithm is not supported. + switch info.CompressionAlgorithm.Name() { + case compression.Gzip.Name(): + mimeType := original[i].MediaType + switch { + case isSchema2ForeignLayer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaTypeGzip + case isSchema2Layer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2LayerMediaType + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + case compression.Zstd.Name(): + return fmt.Errorf("Error preparing updated manifest: zstd compression is not supported for docker images") + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) + } + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: &s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Os: s2.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + i.Env = s2.Config.Env + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go new file mode 100644 index 000000000..453976c48 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -0,0 +1,216 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "runtime" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Schema2PlatformSpec describes the platform which a particular manifest is +// specialized for. +type Schema2PlatformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// Schema2ManifestDescriptor references a platform-specific manifest. +type Schema2ManifestDescriptor struct { + Schema2Descriptor + Platform Schema2PlatformSpec `json:"platform"` +} + +// Schema2List is a list of platform-specific manifests. +type Schema2List struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []Schema2ManifestDescriptor `json:"manifests"` +} + +// MIMEType returns the MIME type of this particular manifest list. +func (list *Schema2List) MIMEType() string { + return list.MediaType +} + +// Instances returns a slice of digests of the manifests that this list knows of. +func (list *Schema2List) Instances() []digest.Digest { + results := make([]digest.Digest, len(list.Manifests)) + for i, m := range list.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the list. +func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range list.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(list.Manifests) { + return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates)) + } + list.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + list.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + } + if err := SupportedSchema2MediaType(updates[i].MediaType); err != nil && SupportedOCI1MediaType(updates[i].MediaType) != nil { + return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), list.Manifests[i].MediaType, updates[i].MediaType) + } + list.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as a schema2 manifest list, and returns the digest +// of the image which is appropriate for the current environment. +func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + for _, d := range list.Manifests { + if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) +} + +// Serialize returns the list in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (list *Schema2List) Serialize() ([]byte, error) { + buf, err := json.Marshal(list) + if err != nil { + return nil, errors.Wrapf(err, "error marshaling Schema2List %#v", list) + } + return buf, nil +} + +// Schema2ListFromComponents creates a Schema2 manifest list instance from the +// supplied data. +func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { + list := Schema2List{ + SchemaVersion: 2, + MediaType: DockerV2ListMediaType, + Manifests: make([]Schema2ManifestDescriptor, len(components)), + } + for i, component := range components { + m := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: dupStringSlice(component.URLs), + }, + Schema2PlatformSpec{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: dupStringSlice(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + Features: dupStringSlice(component.Platform.Features), + }, + } + list.Manifests[i] = m + } + return &list +} + +// Schema2ListClone creates a deep copy of the passed-in list. +func Schema2ListClone(list *Schema2List) *Schema2List { + return Schema2ListFromComponents(list.Manifests) +} + +// ToOCI1Index returns the list encoded as an OCI1 index. +func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) { + components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) + for _, manifest := range list.Manifests { + converted := imgspecv1.Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: dupStringSlice(manifest.URLs), + Platform: &imgspecv1.Platform{ + OS: manifest.Platform.OS, + Architecture: manifest.Platform.Architecture, + OSFeatures: dupStringSlice(manifest.Platform.OSFeatures), + OSVersion: manifest.Platform.OSVersion, + Variant: manifest.Platform.Variant, + }, + } + components = append(components, converted) + } + oci := OCI1IndexFromComponents(components, nil) + return oci, nil +} + +// ToSchema2List returns the list encoded as a Schema2 list. +func (list *Schema2List) ToSchema2List() (*Schema2List, error) { + return Schema2ListClone(list), nil +} + +// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { + list := Schema2List{ + Manifests: []Schema2ManifestDescriptor{}, + } + if err := json.Unmarshal(manifest, &list); err != nil { + return nil, errors.Wrapf(err, "error unmarshaling Schema2List %q", string(manifest)) + } + return &list, nil +} + +// Clone returns a deep copy of this list and its contents. +func (list *Schema2List) Clone() List { + return Schema2ListClone(list) +} + +// ConvertToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return list.Clone(), nil + case imgspecv1.MediaTypeImageIndex: + return list.ToOCI1Index() + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) + } +} diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go new file mode 100644 index 000000000..6d10430fd --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/list.go @@ -0,0 +1,106 @@ +package manifest + +import ( + "fmt" + + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // SupportedListMIMETypes is a list of the manifest list types that we know how to + // read/manipulate/write. + SupportedListMIMETypes = []string{ + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, + } +) + +// List is an interface for parsing, modifying lists of image manifests. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members +// directly. +type List interface { + // MIMEType returns the MIME type of this particular manifest list. + MIMEType() string + + // Instances returns a list of the manifests that this list knows of, other than its own. + Instances() []digest.Digest + + // Update information about the list's instances. The length of the passed-in slice must + // match the length of the list of instances which the list already contains, and every field + // must be specified. + UpdateInstances([]ListUpdate) error + + // Instance returns the size and MIME type of a particular instance in the list. + Instance(digest.Digest) (ListUpdate, error) + + // ChooseInstance selects which manifest is most appropriate for the platform described by the + // SystemContext, or for the current platform if the SystemContext doesn't specify any details. + ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) + + // Serialize returns the list in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded + // from, even if no modifications were made! + Serialize() ([]byte, error) + + // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. + ConvertToMIMEType(mimeType string) (List, error) + + // Clone returns a deep copy of this list and its contents. + Clone() List +} + +// ListUpdate includes the fields which a List's UpdateInstances() method will modify. +type ListUpdate struct { + Digest digest.Digest + Size int64 + MediaType string +} + +// dupStringSlice returns a deep copy of a slice of strings, or nil if the +// source slice is empty. +func dupStringSlice(list []string) []string { + if len(list) == 0 { + return nil + } + dup := make([]string, len(list)) + for i := range list { + dup[i] = list[i] + } + return dup +} + +// dupStringStringMap returns a deep copy of a map[string]string, or nil if the +// passed-in map is nil or has no keys. +func dupStringStringMap(m map[string]string) map[string]string { + if len(m) == 0 { + return nil + } + result := make(map[string]string) + for k, v := range m { + result[k] = v + } + return result +} + +// ListFromBlob parses a list of manifests. +func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { + normalized := NormalizedMIMEType(manifestMIMEType) + switch normalized { + case DockerV2ListMediaType: + return Schema2ListFromManifest(manifest) + case imgspecv1.MediaTypeImageIndex: + return OCI1IndexFromManifest(manifest) + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") + } + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +} + +// ConvertListToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) { + return list.ConvertToMIMEType(manifestMIMEType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go new file mode 100644 index 000000000..5b4d341d8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -0,0 +1,263 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v5/types" + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzippped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. +func SupportedSchema2MediaType(m string) error { + switch m { + case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: + return nil + default: + return fmt.Errorf("unsupported docker v2s2 media type: %q", m) + } +} + +// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource +// should request from the backend unless directed otherwise. +var DefaultRequestedManifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2Schema1SignedMediaType, + DockerV2Schema1MediaType, + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []LayerInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + +// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. +type LayerInfo struct { + types.BlobInfo + EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures interface{} `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // best effort to understand if this is an OCI image since mediaType + // isn't in the manifest for OCI anymore + // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig { + return imgspecv1.MediaTypeImageManifest + } + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 { + if ociMan.Config.MediaType == "" { + return imgspecv1.MediaTypeImageIndex + } + return ociMan.Config.MediaType + } + return DockerV2Schema2MediaType + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. +// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). +func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err // Coverage: This can fail only if rand.Reader fails. + } + + js, err := libtrust.NewJSONSignature(manifest) + if err != nil { + return nil, err + } + if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. + return nil, err + } + return js.PrettySignature("signatures") +} + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + nmt := NormalizedMIMEType(mt) + switch nmt { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + } + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt) +} + +// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func layerInfosToStrings(infos []LayerInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go new file mode 100644 index 000000000..46c551b18 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -0,0 +1,243 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. +func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + Annotations: desc.Annotations, + MediaType: desc.MediaType, + } +} + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// SupportedOCI1MediaType checks if the specified string is a supported OCI1 media type. +func SupportedOCI1MediaType(m string) error { + switch m { + case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader: + return nil + default: + return fmt.Errorf("unsupported OCIv1 media type: %q", m) + } +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedOCI1MediaType(oci1.Config.MediaType); err != nil { + return nil, err + } + for _, layer := range oci1.Layers { + if err := SupportedOCI1MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return BlobInfoFromOCI1Descriptor(m.Config) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromOCI1Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +// isOCI1NonDistributableLayer is a convenience wrapper to check if a given mime +// type is a compressed or decompressed OCI v1 non-distributable layer. +func isOCI1NonDistributableLayer(mimeType string) bool { + switch mimeType { + case imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return true + default: + return false + } +} + +// isOCI1Layer is a convenience wrapper to check if a given mime type is a +// compressed or decompressed OCI v1 layer. +func isOCI1Layer(mimeType string) bool { + switch mimeType { + case imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd: + return true + default: + return false + } +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + // First make sure we support the media type of the original layer. + if err := SupportedOCI1MediaType(original[i].MediaType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) + } + + // Set the correct media types based on the specified compression + // operation, the desired compression algorithm AND the original media + // type. + // + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed, which + // is being addressed in `isSchema2{Foreign}Layer`. + switch info.CompressionOperation { + case types.PreserveOriginal: + // Keep the original media type. + m.Layers[i].MediaType = original[i].MediaType + + case types.Decompress: + // Decompress the original media type and check if it was + // non-distributable one or not. + mimeType := original[i].MediaType + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) + } + + case types.Compress: + if info.CompressionAlgorithm == nil { + logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) + m.Layers[i].MediaType = original[i].MediaType + break + } + // Compress the original media type and set the new one based on + // that type (distributable or not) and the specified compression + // algorithm. Throw an error if the algorithm is not supported. + mimeType := original[i].MediaType + switch info.CompressionAlgorithm.Name() { + case compression.Gzip.Name(): + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + + case compression.Zstd.Name(): + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableZstd + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) + } + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + json.Unmarshal(config, d1) + i := &types.ImageInspectInfo{ + Tag: "", + Created: v1.Created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + Env: d1.Config.Env, + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go new file mode 100644 index 000000000..816503ce5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -0,0 +1,221 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "runtime" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// OCI1Index is just an alias for the OCI index type, but one which we can +// provide methods for. +type OCI1Index struct { + imgspecv1.Index +} + +// MIMEType returns the MIME type of this particular manifest index. +func (index *OCI1Index) MIMEType() string { + return imgspecv1.MediaTypeImageIndex +} + +// Instances returns a slice of digests of the manifests that this index knows of. +func (index *OCI1Index) Instances() []digest.Digest { + results := make([]digest.Digest, len(index.Manifests)) + for i, m := range index.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the index. +func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range index.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(index.Manifests) { + return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates)) + } + index.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + index.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + } + if err := SupportedOCI1MediaType(updates[i].MediaType); err != nil && SupportedSchema2MediaType(updates[i].MediaType) != nil && updates[i].MediaType != imgspecv1.MediaTypeImageIndex { + return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), index.Manifests[i].MediaType, updates[i].MediaType) + } + index.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest +// of the image which is appropriate for the current environment. +func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + for _, d := range index.Manifests { + if d.Platform != nil && d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil + } + } + for _, d := range index.Manifests { + if d.Platform == nil { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in image index for architecture %s, OS %s", wantedArch, wantedOS) +} + +// Serialize returns the index in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (index *OCI1Index) Serialize() ([]byte, error) { + buf, err := json.Marshal(index) + if err != nil { + return nil, errors.Wrapf(err, "error marshaling OCI1Index %#v", index) + } + return buf, nil +} + +// OCI1IndexFromComponents creates an OCI1 image index instance from the +// supplied data. +func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { + index := OCI1Index{ + imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + Manifests: make([]imgspecv1.Descriptor, len(components)), + Annotations: dupStringStringMap(annotations), + }, + } + for i, component := range components { + var platform *imgspecv1.Platform + if component.Platform != nil { + platform = &imgspecv1.Platform{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: dupStringSlice(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + } + } + m := imgspecv1.Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: dupStringSlice(component.URLs), + Annotations: dupStringStringMap(component.Annotations), + Platform: platform, + } + index.Manifests[i] = m + } + return &index +} + +// OCI1IndexClone creates a deep copy of the passed-in index. +func OCI1IndexClone(index *OCI1Index) *OCI1Index { + return OCI1IndexFromComponents(index.Manifests, index.Annotations) +} + +// ToOCI1Index returns the index encoded as an OCI1 index. +func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) { + return OCI1IndexClone(index), nil +} + +// ToSchema2List returns the index encoded as a Schema2 list. +func (index *OCI1Index) ToSchema2List() (*Schema2List, error) { + components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) + for _, manifest := range index.Manifests { + platform := manifest.Platform + if platform == nil { + platform = &imgspecv1.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + } + converted := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: dupStringSlice(manifest.URLs), + }, + Schema2PlatformSpec{ + OS: platform.OS, + Architecture: platform.Architecture, + OSFeatures: dupStringSlice(platform.OSFeatures), + OSVersion: platform.OSVersion, + Variant: platform.Variant, + }, + } + components = append(components, converted) + } + s2 := Schema2ListFromComponents(components) + return s2, nil +} + +// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest index. +func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { + index := OCI1Index{ + Index: imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + Manifests: []imgspecv1.Descriptor{}, + Annotations: make(map[string]string), + }, + } + if err := json.Unmarshal(manifest, &index); err != nil { + return nil, errors.Wrapf(err, "error unmarshaling OCI1Index %q", string(manifest)) + } + return &index, nil +} + +// Clone returns a deep copy of this list and its contents. +func (index *OCI1Index) Clone() List { + return OCI1IndexClone(index) +} + +// ConvertToMIMEType converts the passed-in image index to a manifest list of +// the specified type. +func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return index.ToSchema2List() + case imgspecv1.MediaTypeImageIndex: + return index.Clone(), nil + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) + } +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go new file mode 100644 index 000000000..164d5522d --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -0,0 +1,159 @@ +package archive + +import ( + "context" + "io" + "os" + + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type ociArchiveImageDestination struct { + ref ociArchiveReference + unpackedDest types.ImageDestination + tempDirRef tempDirOCIRef +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { + tempDirRef, err := createOCIRef(ref.image) + if err != nil { + return nil, errors.Wrapf(err, "error creating oci reference") + } + unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageDestination{ref: ref, + unpackedDest: unpackedDest, + tempDirRef: tempDirRef}, nil +} + +// Reference returns the reference used to set up this destination. +func (d *ociArchiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any +// Close deletes the temp directory of the oci-archive image +func (d *ociArchiveImageDestination) Close() error { + defer d.tempDirRef.deleteTempDir() + return d.unpackedDest.Close() +} + +func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { + return d.unpackedDest.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures +func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { + return d.unpackedDest.SupportsSignatures(ctx) +} + +func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return d.unpackedDest.DesiredLayerCompression() +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { + return d.unpackedDest.AcceptsForeignLayerURLs() +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise +func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { + return d.unpackedDest.MustMatchRuntimeOS() +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.unpackedDest.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes the manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutManifest(ctx, m, instanceDigest) +} + +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest) +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted +// after the directory is made, it is tarred up into a file and the directory is deleted +func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil { + return errors.Wrapf(err, "error storing image %q", d.ref.image) + } + + // path of directory to tar up + src := d.tempDirRef.tempDirectory + // path to save tarred up file + dst := d.ref.resolvedFile + return tarDirectory(src, dst) +} + +// tar converts the directory at src and saves it to dst +func tarDirectory(src, dst string) error { + // input is a stream of bytes from the archive of the directory at path + input, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + return errors.Wrapf(err, "error retrieving stream of bytes from %q", src) + } + + // creates the tar file + outFile, err := os.Create(dst) + if err != nil { + return errors.Wrapf(err, "error creating tar file %q", dst) + } + defer outFile.Close() + + // copies the contents of the directory to the tar file + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + _, err = io.Copy(outFile, input) + + return err +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go new file mode 100644 index 000000000..33a41d44b --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -0,0 +1,109 @@ +package archive + +import ( + "context" + "io" + + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociArchiveImageSource struct { + ref ociArchiveReference + unpackedSrc types.ImageSource + tempDirRef tempDirOCIRef +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource untars the file and saves it in a temp directory +func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { + tempDirRef, err := createUntarTempDir(ref) + if err != nil { + return nil, errors.Wrap(err, "error creating temp directory") + } + + unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageSource{ref: ref, + unpackedSrc: unpackedSrc, + tempDirRef: tempDirRef}, nil +} + +// LoadManifestDescriptor loads the manifest +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociArchRef, ok := imgRef.(ociArchiveReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") + } + tempDirRef, err := createUntarTempDir(ociArchRef) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory") + } + defer tempDirRef.deleteTempDir() + + descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index") + } + return descriptor, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociArchiveImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +// Close deletes the temporary directory at dst +func (s *ociArchiveImageSource) Close() error { + defer s.tempDirRef.deleteTempDir() + return s.unpackedSrc.Close() +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + return s.unpackedSrc.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return s.unpackedSrc.LayerInfosForCopy(ctx, instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go new file mode 100644 index 000000000..2d72a6fee --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -0,0 +1,192 @@ +package archive + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/oci/internal" + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OCI archive +// it creates an oci-archive tar file by calling into the OCI transport +// tarring the directory created by oci and deleting the directory +var Transport = ociArchiveTransport{} + +type ociArchiveTransport struct{} + +// ociArchiveReference is an ImageReference for OCI Archive paths +type ociArchiveReference struct { + file string + resolvedFile string + image string +} + +func (t ociArchiveTransport) Name() string { + return "oci-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix +// into an ImageReference. +func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + file, image := internal.SplitPathAndImage(reference) + return NewReference(file, image) +} + +// NewReference returns an OCI reference for a file and a image. +func NewReference(file, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err + } + + if err := internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil +} + +func (ref ociArchiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +func (ref ociArchiveReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.file, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +func (ref ociArchiveReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +func (ref ociArchiveReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return fmt.Sprintf("%s", ref.resolvedFile) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set +func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// struct to store the ociReference and temporary directory returned by createOCIRef +type tempDirOCIRef struct { + tempDirectory string + ociRefExtracted types.ImageReference +} + +// deletes the temporary directory created +func (t *tempDirOCIRef) deleteTempDir() error { + return os.RemoveAll(t.tempDirectory) +} + +// createOCIRef creates the oci reference of the image +func createOCIRef(image string) (tempDirOCIRef, error) { + dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") + if err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") + } + ociRef, err := ocilayout.NewReference(dir, image) + if err != nil { + return tempDirOCIRef{}, err + } + + tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} + return tempDirRef, nil +} + +// creates the temporary directory and copies the tarred content to it +func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) { + tempDirRef, err := createOCIRef(ref.image) + if err != nil { + return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference") + } + src := ref.resolvedFile + dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + if err := archive.UntarPath(src, dst); err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory) + } + return tempDirRef, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go new file mode 100644 index 000000000..c2012e50e --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -0,0 +1,126 @@ +package internal + +import ( + "github.com/pkg/errors" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = errors.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + sep := strings.SplitN(reference, ":", 2) + path := sep[0] + + var image string + if len(sep) == 2 { + image = sep[1] + } + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go new file mode 100644 index 000000000..370e8d2cd --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -0,0 +1,342 @@ +package layout + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageDestination struct { + ref ociReference + index imgspecv1.Index + sharedBlobDir string + acceptUncompressedLayers bool +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + Annotations: make(map[string]string), + } + } + + d := &ociImageDestination{ref: ref, index: *index} + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +func (d *ociImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Pushing signatures for OCI images is not supported") +} + +func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.acceptUncompressedLayers { + return types.PreserveOriginal + } + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *ociImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + } + + blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) + if err != nil { + return types.BlobInfo{}, err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return types.BlobInfo{}, err + } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, types.BlobInfo{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, +// this should be either an OCI manifest (possibly converted to this format by the caller) or index, +// neither of which we'll need to modify further. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var digest digest.Digest + var err error + if instanceDigest != nil { + digest = *instanceDigest + } else { + digest, err = manifest.Digest(m) + if err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if instanceDigest != nil { + return nil + } + + // If we had platform information, we'd build an imgspecv1.Platform structure here. + + // Start filling out the descriptor for this entry + desc := imgspecv1.Descriptor{} + desc.Digest = digest + desc.Size = int64(len(m)) + if d.ref.image != "" { + desc.Annotations = make(map[string]string) + desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image + } + + // If we knew the MIME type, we wouldn't have to guess here. + desc.MediaType = manifest.GuessMIMEType(m) + + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + // If the new entry has a name, remove any conflicting names which we already have. + if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" { + // The name is being set on a new entry, so remove any older ones that had the same name. + // We might be storing an index and all of its component images, and we'll want to attach + // the name to the last one, which is the index. + for i, manifest := range d.index.Manifests { + if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] { + delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName) + break + } + } + } + // If it has the same digest as another entry in the index, we already overwrote the file, + // so just pick up the other information. + for i, manifest := range d.index.Manifests { + if manifest.Digest == desc.Digest { + // Replace it completely. + d.index.Manifests[i] = *desc + return + } + } + // It's a new entry to be added to the index. + d.index.Manifests = append(d.index.Manifests, *desc) +} + +// PutSignatures would add the given signatures to the oci layout (currently not supported). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if len(signatures) != 0 { + return errors.Errorf("Pushing signatures for OCI images is not supported") + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { + if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go new file mode 100644 index 000000000..f515203df --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -0,0 +1,189 @@ +package layout + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageSource struct { + ref ociReference + index *imgspecv1.Index + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + index, err := ref.getIndex() + if err != nil { + return nil, err + } + d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client} + if sys != nil { + // TODO(jonboulle): check dir existence? + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + return d, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + var err error + + if instanceDigest == nil { + dig = digest.Digest(s.descriptor.Digest) + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + for _, md := range s.index.Manifests { + if md.Digest == dig { + mimeType = md.MediaType + break + } + } + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + + m, err := ioutil.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + if mimeType == "" { + mimeType = manifest.GuessMIMEType(m) + } + + return m, mimeType, nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + return s.getExternalBlob(ctx, info.URLs) + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return [][]byte{}, nil +} + +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + errWrap := errors.New("failed fetching external blob from all urls") + for _, url := range urls { + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) + continue + } + + resp, err := s.client.Do(req.WithContext(ctx)) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + + return nil, 0, errWrap +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go new file mode 100644 index 000000000..c662c9a7a --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -0,0 +1,264 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/oci/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="", it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json + image string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image := internal.SplitPathAndImage(reference) + return NewReference(dir, image) +} + +// NewReference returns an OCI reference for a directory and a image. +// +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func NewReference(dir, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return fmt.Sprintf("%s", ref.resolvedDir) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, err + } + + var d *imgspecv1.Descriptor + if ref.image == "" { + // return manifest if only one image is in the oci directory + if len(index.Manifests) == 1 { + d = &index.Manifests[0] + } else { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, ErrMoreThanOneImage + } + } else { + // if image specified, look through all manifests for a match + for _, md := range index.Manifests { + if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex { + continue + } + refName, ok := md.Annotations[imgspecv1.AnnotationRefName] + if !ok { + continue + } + if refName == ref.image { + d = &md + break + } + } + } + if d == nil { + return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) + } + return *d, nil +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") + } + return ociRef.getManifestDescriptor() +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, "oci-layout") +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, "index.json") +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", errors.Wrapf(err, "unexpected digest reference %s", digest) + } + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go new file mode 100644 index 000000000..f45dc24c4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -0,0 +1,1170 @@ +package openshift + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "github.com/pkg/errors" + "golang.org/x/net/http2" + "k8s.io/client-go/util/homedir" +) + +// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig. +// restTLSClientConfig contains settings to enable transport layer security +type restTLSClientConfig struct { + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config. +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type restConfig struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // TLSClientConfig contains settings to enable transport layer security + restTLSClientConfig + + // Server should be accessed without verifying the TLS + // certificate. For testing only. + Insecure bool +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. +// ClientConfig is used to make it easy to get an api server client +type clientConfig interface { + // ClientConfig returns a complete client config + ClientConfig() (*restConfig, error) +} + +// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. +func defaultClientConfig() clientConfig { + loadingRules := newOpenShiftClientConfigLoadingRules() + // REMOVED: Allowing command-line overriding of loadingRules + // REMOVED: clientcmd.ConfigOverrides + + clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) + + return clientConfig +} + +var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") + +// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. +// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. +// 1. --config value +// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file +func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { + chain := []string{} + + envVarFile := os.Getenv("KUBECONFIG") + if len(envVarFile) != 0 { + chain = append(chain, filepath.SplitList(envVarFile)...) + } else { + chain = append(chain, recommendedHomeFile) + } + + return &clientConfigLoadingRules{ + Precedence: chain, + // REMOVED: Migration support; run (oc login) to trigger migration + } +} + +// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type deferredLoadingClientConfig struct { + loadingRules *clientConfigLoadingRules + + clientConfig clientConfig +} + +// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { + return &deferredLoadingClientConfig{loadingRules: loadingRules} +} + +func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { + if config.clientConfig == nil { + // REMOVED: Support for concurrent use in multiple threads. + mergedConfig, err := config.loadingRules.Load() + if err != nil { + return nil, err + } + + var mergedClientConfig clientConfig + // REMOVED: Interactive fallback support. + mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig) + + config.clientConfig = mergedClientConfig + } + + return config.clientConfig, nil +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + mergedConfig, err := mergedClientConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: In-cluster service account configuration use. + + return mergedConfig, nil +} + +var ( + // DefaultCluster is the cluster config used when no other config is specified + // TODO: eventually apiserver should start on 443 and be secure by default + defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} + + // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name + envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} +) + +// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type directClientConfig struct { + config clientcmdConfig +} + +// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { + return &directClientConfig{config} +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *directClientConfig) ClientConfig() (*restConfig, error) { + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + configAuthInfo := config.getAuthInfo() + configClusterInfo := config.getCluster() + + clientConfig := &restConfig{} + clientConfig.Host = configClusterInfo.Server + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + + // only try to read the auth information if we are secure + if isConfigTransportTLS(*clientConfig) { + var err error + // REMOVED: Support for interactive fallback. + userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) + if err != nil { + return nil, err + } + mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig) + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig) + } + + return clientConfig, nil +} + +// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { + mergedConfig := &restConfig{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restConfig{} + configClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + mergo.MergeWithOverwrite(mergedConfig, configClientConfig) + + return mergedConfig, nil +} + +// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identifcation +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { + mergedConfig := &restConfig{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + + // REMOVED: prompting for missing information. + return mergedConfig, nil +} + +// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser +func canIdentifyUser(config restConfig) bool { + return len(config.Username) > 0 || + (len(config.CertFile) > 0 || len(config.CertData) > 0) || + len(config.BearerToken) > 0 + +} + +// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *directClientConfig) ConfirmUsable() error { + var validationErrors []error + validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) + validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { + return newErrConfigurationInvalid([]error{errEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. +func (config *directClientConfig) getContextName() string { + // REMOVED: overrides support + return config.config.CurrentContext +} + +// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. +func (config *directClientConfig) getAuthInfoName() string { + // REMOVED: overrides support + return config.getContext().AuthInfo +} + +// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. +func (config *directClientConfig) getClusterName() string { + // REMOVED: overrides support + return config.getContext().Cluster +} + +// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. +func (config *directClientConfig) getContext() clientcmdContext { + contexts := config.config.Contexts + contextName := config.getContextName() + + var mergedContext clientcmdContext + if configContext, exists := contexts[contextName]; exists { + mergo.MergeWithOverwrite(&mergedContext, configContext) + } + // REMOVED: overrides support + + return mergedContext +} + +var ( + errEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + errEmptyCluster = errors.New("cluster has no server defined") +) + +// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { + var validationErrors []error + + if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { + return []error{errEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) + defer clientCertCA.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { + var validationErrors []error + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + clientCertFile, err := os.Open(authInfo.ClientCertificate) + defer clientCertFile.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + clientKeyFile, err := os.Open(authInfo.ClientKey) + defer clientKeyFile.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. +func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { + authInfos := config.config.AuthInfos + authInfoName := config.getAuthInfoName() + + var mergedAuthInfo clientcmdAuthInfo + if configAuthInfo, exists := authInfos[authInfoName]; exists { + mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo) + } + // REMOVED: overrides support + + return mergedAuthInfo +} + +// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. +func (config *directClientConfig) getCluster() clientcmdCluster { + clusterInfos := config.config.Clusters + clusterInfoName := config.getClusterName() + + var mergedClusterInfo clientcmdCluster + mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster) + mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster) + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo) + } + // REMOVED: overrides support + + return mergedClusterInfo +} + +// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregateErr []error + +// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func newAggregate(errlist []error) error { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregateErr(errs) +} + +// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. +// Error is part of the error interface. +func (agg aggregateErr) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// REMOVED: aggregateErr.Errors + +// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +var _ error = errConfigurationInvalid{} + +// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. + +// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) +} + +// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present +type clientConfigLoadingRules struct { + Precedence []string +} + +// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { + errlist := []error{} + + kubeConfigFiles := []string{} + + // REMOVED: explicit path support + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + + kubeconfigs := []*clientcmdConfig{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := loadFromFile(filename) + if os.IsNotExist(err) { + // skip missing files + continue + } + if err != nil { + errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + // first merge all of our maps + mapConfig := clientcmdNewConfig() + for _, kubeconfig := range kubeconfigs { + mergo.MergeWithOverwrite(mapConfig, kubeconfig) + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdNewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + mergo.MergeWithOverwrite(nonMapConfig, kubeconfig) + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdNewConfig() + mergo.MergeWithOverwrite(config, mapConfig) + mergo.MergeWithOverwrite(config, nonMapConfig) + + // REMOVED: Possibility to skip this. + if err := resolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + + return config, newAggregate(errlist) +} + +// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile +// LoadFromFile takes a filename and deserializes the contents into Config object +func loadFromFile(filename string) (*clientcmdConfig, error) { + kubeconfigBytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := load(kubeconfigBytes) + if err != nil { + return nil, err + } + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdAuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdCluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdContext{} + } + + return config, nil +} + +// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func load(data []byte) (*clientcmdConfig, error) { + config := clientcmdNewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + // Note: This does absolutely no kind/version checking or conversions. + data, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, config); err != nil { + return nil, err + } + return config, nil +} + +// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func resolveLocalPaths(config *clientcmdConfig) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) + } + + if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) + } + + if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. +func getClusterFileReferences(cluster *clientcmdCluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. +func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} +} + +// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func resolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor. +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { + // REMOVED: Configurable GroupVersion, Codec + // REMOVED: Configurable versionedAPIPath + baseURL, err := defaultServerURLFor(config) + if err != nil { + return nil, nil, err + } + + transport, err := transportFor(config) + if err != nil { + return nil, nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + } + + // REMOVED: Configurable QPS, Burst, ContentConfig + // REMOVED: Actually returning a RESTClient object. + return baseURL, httpClient, nil +} + +// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL. +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { + if host == "" { + return nil, errors.Errorf("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil { + return nil, err + } + if hostURL.Scheme == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // REMOVED: versionedAPIPath computation. + return hostURL, nil +} + +// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor. +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerURLFor(config *restConfig) (*url.URL, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 + hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + // REMOVED: Configurable APIPath, GroupVersion + return defaultServerURL(host, defaultTLS) +} + +// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor. +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func transportFor(config *restConfig) (http.RoundTripper, error) { + // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support + return transportNew(config) +} + +// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS. +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func isConfigTransportTLS(config restConfig) bool { + baseURL, err := defaultServerURLFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func transportNew(config *restConfig) (http.RoundTripper, error) { + // REMOVED: custom config.Transport support. + // Set transport level security + + var ( + rt http.RoundTripper + err error + ) + + rt, err = tlsCacheGet(config) + if err != nil { + return nil, err + } + + // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. + if len(config.Username) != 0 && len(config.BearerToken) != 0 { + return nil, errors.Errorf("username/password or bearer token may be set, but not both") + } + + return rt, nil +} + +// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} + +// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. +func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { + // REMOVED: any actual caching + + // Get the TLS options for this client config + tlsConfig, err := tlsConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil { + return http.DefaultTransport, nil + } + + // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. + t := &http.Transport{ + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + } + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { + _ = http2.ConfigureTransport(t) + } + return t, nil +} + +// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func tlsConfigFor(c *restConfig) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { + return nil, nil + } + if c.HasCA() && c.Insecure { + return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) + MinVersion: tls.VersionTLS10, + InsecureSkipVerify: c.Insecure, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.CertData, c.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *restConfig) error { + var err error + c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + if err != nil { + return err + } + + c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + if err != nil { + return err + } + + c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} + +// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. +// HasCA returns whether the configuration has a certificate authority or not. +func (c *restConfig) HasCA() bool { + return len(c.CAData) > 0 || len(c.CAFile) > 0 +} + +// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *restConfig) HasCertAuth() bool { + return len(c.CertData) != 0 || len(c.CertFile) != 0 +} + +// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type clientcmdConfig struct { + // Clusters is a map of referencable names to cluster configs + Clusters clustersMap `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos authInfosMap `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts contextsMap `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` +} + +type clustersMap map[string]*clientcmdCluster + +func (m *clustersMap) UnmarshalJSON(data []byte) error { + var a []v1NamedCluster + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + cluster := e.Cluster // Allocates a new instance in each iteration + (*m)[e.Name] = &cluster + } + return nil +} + +type authInfosMap map[string]*clientcmdAuthInfo + +func (m *authInfosMap) UnmarshalJSON(data []byte) error { + var a []v1NamedAuthInfo + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + authInfo := e.AuthInfo // Allocates a new instance in each iteration + (*m)[e.Name] = &authInfo + } + return nil +} + +type contextsMap map[string]*clientcmdContext + +func (m *contextsMap) UnmarshalJSON(data []byte) error { + var a []v1NamedContext + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + context := e.Context // Allocates a new instance in each iteration + (*m)[e.Name] = &context + } + return nil +} + +// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func clientcmdNewConfig() *clientcmdConfig { + return &clientcmdConfig{ + Clusters: make(map[string]*clientcmdCluster), + AuthInfos: make(map[string]*clientcmdAuthInfo), + Contexts: make(map[string]*clientcmdContext), + } +} + +// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. +// Cluster contains information about how to communicate with a kubernetes cluster +type clientcmdCluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` +} + +// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type clientcmdAuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `json:"token,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `json:"password,omitempty"` +} + +// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type clientcmdContext struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `json:"namespace,omitempty"` +} + +// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. +// NamedCluster relates nicknames to cluster information +type v1NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster clientcmdCluster `json:"cluster"` +} + +// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. +// NamedContext relates nicknames to context information +type v1NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context clientcmdContext `json:"context"` +} + +// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. +// NamedAuthInfo relates nicknames to auth information +type v1NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo clientcmdAuthInfo `json:"user"` +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go new file mode 100644 index 000000000..016de4803 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -0,0 +1,577 @@ +package openshift + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// openshiftClient is configuration for dealing with a single image stream, for reading or writing. +type openshiftClient struct { + ref openshiftReference + baseURL *url.URL + // Values from Kubernetes configuration + httpClient *http.Client + bearerToken string // "" if not used + username string // "" if not used + password string // if username != "" +} + +// newOpenshiftClient creates a new openshiftClient for the specified reference. +func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { + // We have already done this parsing in ParseReference, but thrown away + // httpClient. So, parse again. + // (We could also rework/split restClientFor to "get base URL" to be done + // in ParseReference, and "get httpClient" to be done here. But until/unless + // we support non-default clusters, this is good enough.) + + // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. + cmdConfig := defaultClientConfig() + logrus.Debugf("cmdConfig: %#v", cmdConfig) + restConfig, err := cmdConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) + logrus.Debugf("restConfig: %#v", restConfig) + baseURL, httpClient, err := restClientFor(restConfig) + if err != nil { + return nil, err + } + logrus.Debugf("URL: %#v", *baseURL) + + if httpClient == nil { + httpClient = http.DefaultClient + } + + return &openshiftClient{ + ref: ref, + baseURL: baseURL, + httpClient: httpClient, + bearerToken: restConfig.BearerToken, + username: restConfig.Username, + password: restConfig.Password, + }, nil +} + +// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. +func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { + url := *c.baseURL + url.Path = path + var requestBodyReader io.Reader + if requestBody != nil { + logrus.Debugf("Will send body: %s", requestBody) + requestBodyReader = bytes.NewReader(requestBody) + } + req, err := http.NewRequest(method, url.String(), requestBodyReader) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + if len(c.bearerToken) != 0 { + req.Header.Set("Authorization", "Bearer "+c.bearerToken) + } else if len(c.username) != 0 { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Accept", "application/json, */*") + req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) + if requestBody != nil { + req.Header.Set("Content-Type", "application/json") + } + + logrus.Debugf("%s %s", method, url.String()) + res, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + logrus.Debugf("Got body: %s", body) + // FIXME: Just throwing this useful information away only to try to guess later... + logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) + + var status status + statusValid := false + if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { + statusValid = true + } + + switch { + case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. + if statusValid && status.Status != "Success" { + return nil, errors.New(status.Message) + } + case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: + // OK. + default: + if statusValid { + return nil, errors.New(status.Message) + } + return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) + } + + return body, nil +} + +// getImage loads the specified image object. +func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) + body, err := c.doRequest(ctx, "GET", path, nil) + if err != nil { + return nil, err + } + // Note: This does absolutely no kind/version checking or conversions. + var isi imageStreamImage + if err := json.Unmarshal(body, &isi); err != nil { + return nil, err + } + return &isi.Image, nil +} + +// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; +// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. +func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { + parts := strings.SplitN(ref, "/", 2) + if len(parts) != 2 { + return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) + } + return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil +} + +type openshiftImageSource struct { + client *openshiftClient + // Values specific to this image + sys *types.SystemContext + // State + docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet + imageStreamImageName string // Resolved image identifier, or "" if not known yet +} + +// newImageSource creates a new ImageSource for the specified reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + return &openshiftImageSource{ + client: client, + sys: sys, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *openshiftImageSource) Reference() types.ImageReference { + return s.client.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *openshiftImageSource) Close() error { + if s.docker != nil { + err := s.docker.Close() + s.docker = nil + + return err + } + + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, "", err + } + return s.docker.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, 0, err + } + return s.docker.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var imageStreamImageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageStreamImageName = s.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + image, err := s.client.getImage(ctx, imageStreamImageName) + if err != nil { + return nil, err + } + var sigs [][]byte + for _, sig := range image.Signatures { + if sig.Type == imageSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +// ensureImageIsResolved sets up s.docker and s.imageStreamImageName +func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { + if s.docker != nil { + return nil + } + + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) + body, err := s.client.doRequest(ctx, "GET", path, nil) + if err != nil { + return err + } + // Note: This does absolutely no kind/version checking or conversions. + var is imageStream + if err := json.Unmarshal(body, &is); err != nil { + return err + } + var te *tagEvent + for _, tag := range is.Status.Tags { + if tag.Tag != s.client.ref.dockerReference.Tag() { + continue + } + if len(tag.Items) > 0 { + te = &tag.Items[0] + break + } + } + if te == nil { + return errors.Errorf("No matching tag found") + } + logrus.Debugf("tag event %#v", te) + dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) + if err != nil { + return err + } + logrus.Debugf("Resolved reference %#v", dockerRefString) + dockerRef, err := docker.ParseReference("//" + dockerRefString) + if err != nil { + return err + } + d, err := dockerRef.NewImageSource(ctx, s.sys) + if err != nil { + return err + } + s.docker = d + s.imageStreamImageName = te.Image + return nil +} + +type openshiftImageDestination struct { + client *openshiftClient + docker types.ImageDestination // The Docker Registry endpoint + // State + imageStreamImageName string // "" if not yet known +} + +// newImageDestination creates a new ImageDestination for the specified reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, + // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know + // the manifest digest at this point. + dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) + dockerRef, err := docker.ParseReference(dockerRefString) + if err != nil { + return nil, err + } + docker, err := dockerRef.NewImageDestination(ctx, sys) + if err != nil { + return nil, err + } + + return &openshiftImageDestination{ + client: client, + docker: docker, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *openshiftImageDestination) Reference() types.ImageReference { + return d.client.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *openshiftImageDestination) Close() error { + return d.docker.Close() +} + +func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { + return d.docker.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.docker.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest == nil { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return err + } + d.imageStreamImageName = manifestDigest.String() + } + return d.docker.PutManifest(ctx, m, instanceDigest) +} + +func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + var imageStreamName string + if instanceDigest == nil { + if d.imageStreamImageName == "" { + return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") + } + imageStreamName = d.imageStreamImageName + } else { + imageStreamName = instanceDigest.String() + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures. + + if len(signatures) == 0 { + return nil // No need to even read the old state. + } + + image, err := d.client.getImage(ctx, imageStreamName) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range image.Signatures { + existingSigNames[sig.objectMeta.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range image.Signatures { + if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "Error generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", imageStreamName, randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + // Note: This does absolutely no kind/version checking or conversions. + sig := imageSignature{ + typeMeta: typeMeta{ + Kind: "ImageSignature", + APIVersion: "v1", + }, + objectMeta: objectMeta{Name: signatureName}, + Type: imageSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body) + if err != nil { + return err + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.docker.Commit(ctx, unparsedToplevel) +} + +// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. +type imageStream struct { + Status imageStreamStatus `json:"status,omitempty"` +} +type imageStreamStatus struct { + DockerImageRepository string `json:"dockerImageRepository"` + Tags []namedTagEventList `json:"tags,omitempty"` +} +type namedTagEventList struct { + Tag string `json:"tag"` + Items []tagEvent `json:"items"` +} +type tagEvent struct { + DockerImageReference string `json:"dockerImageReference"` + Image string `json:"image"` +} +type imageStreamImage struct { + Image image `json:"image"` +} +type image struct { + objectMeta `json:"metadata,omitempty"` + DockerImageReference string `json:"dockerImageReference,omitempty"` + // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` + DockerImageManifest string `json:"dockerImageManifest,omitempty"` + // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` + Signatures []imageSignature `json:"signatures,omitempty"` +} + +const imageSignatureTypeAtomic string = "atomic" + +type imageSignature struct { + typeMeta `json:",inline"` + objectMeta `json:"metadata,omitempty"` + Type string `json:"type"` + Content []byte `json:"content"` + // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ImageIdentity string `json:"imageIdentity,omitempty"` + // SignedClaims map[string]string `json:"signedClaims,omitempty"` + // Created *unversioned.Time `json:"created,omitempty"` + // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` + // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` +} +type typeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} +type objectMeta struct { + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + Namespace string `json:"namespace,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + Generation int64 `json:"generation,omitempty"` + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status +type status struct { + Status string `json:"status,omitempty"` + Message string `json:"message,omitempty"` + // Reason StatusReason `json:"reason,omitempty"` + // Details *StatusDetails `json:"details,omitempty"` + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go new file mode 100644 index 000000000..6bbb43be2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -0,0 +1,157 @@ +package openshift + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + genericImage "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OpenShift registry-hosted images. +var Transport = openshiftTransport{} + +type openshiftTransport struct{} + +func (t openshiftTransport) Name() string { + return "atomic" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// Note that imageNameRegexp is namespace/stream:tag, this +// is HOSTNAME/namespace/stream:tag or parent prefixes. +// Keep this in sync with imageNameRegexp! +var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { + if scopeRegexp.FindStringIndex(scope) == nil { + return errors.Errorf("Invalid scope name %s", scope) + } + return nil +} + +// openshiftReference is an ImageReference for OpenShift images. +type openshiftReference struct { + dockerReference reference.NamedTagged + namespace string // Computed from dockerReference in advance. + stream string // Computed from dockerReference in advance. +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. +func ParseReference(ref string) (types.ImageReference, error) { + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) + } + tagged, ok := r.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) + } + return NewReference(tagged) +} + +// NewReference returns an OpenShift reference for a reference.NamedTagged +func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { + r := strings.SplitN(reference.Path(dockerRef), "/", 3) + if len(r) != 2 { + return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", + reference.FamiliarString(dockerRef)) + } + return openshiftReference{ + namespace: r[0], + stream: r[1], + dockerReference: dockerRef, + }, nil +} + +func (ref openshiftReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref openshiftReference) StringWithinTransport() string { + return reference.FamiliarString(ref.dockerReference) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref openshiftReference) DockerReference() reference.Named { + return ref.dockerReference +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref openshiftReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) + if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref openshiftReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return genericImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for atomic: images") +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go new file mode 100644 index 000000000..c442b4d2e --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -0,0 +1,517 @@ +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/klauspost/pgzip" + "github.com/opencontainers/go-digest" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type blobToImport struct { + Size int64 + Digest digest.Digest + BlobPath string +} + +type descriptor struct { + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` +} + +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type manifestSchema struct { + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` +} + +type ostreeImageDestination struct { + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo +} + +// newImageDestination returns an ImageDestination for writing to an existing ostree. +func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { + tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) + if err := ensureDirectoryExists(tmpDirPath); err != nil { + return nil, err + } + return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ostreeImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } + return os.RemoveAll(d.tmpDirPath) +} + +func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") + if err != nil { + return types.BlobInfo{}, err + } + + blobPath := filepath.Join(tmpDir, "content") + blobFile, err := os.Create(blobPath) + if err != nil { + return types.BlobInfo{}, err + } + defer blobFile.Close() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + hash := computedDigest.Hex() + d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { + entries, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + + for _, info := range entries { + fullpath := filepath.Join(dir, info.Name()) + if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if err := os.Remove(fullpath); err != nil { + return err + } + continue + } + + if selinuxHnd != nil { + relPath, err := filepath.Rel(root, fullpath) + if err != nil { + return err + } + // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, + // thus we benefit from maintaining the same SELinux label they would have on the host as we could + // use hard links instead of copying the files. + relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) + + relPathC := C.CString(relPath) + defer C.free(unsafe.Pointer(relPathC)) + var context *C.char + + res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) + if int(res) < 0 && err != syscall.ENOENT { + return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) + } + if int(res) == 0 { + defer C.freecon(context) + fullpathC := C.CString(fullpath) + defer C.free(unsafe.Pointer(fullpathC)) + res, err = C.lsetfilecon_raw(fullpathC, context) + if int(res) < 0 { + return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context)) + } + } + } + + if info.IsDir() { + if usermode { + if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { + return err + } + } + err = fixFiles(selinuxHnd, root, fullpath, usermode) + if err != nil { + return err + } + } else if usermode && (info.Mode().IsRegular()) { + if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { + return err + } + } + } + + return nil +} + +func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { + opts := otbuiltin.NewCommitOptions() + opts.AddMetadataString = metadata + opts.Timestamp = time.Now() + // OCI layers have no parent OSTree commit + opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + _, err := repo.Commit(root, branch, opts) + return err +} + +func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { + mfz := pgzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return "", -1, err + } + defer stream.Close() + + gzReader, err := archive.DecompressStream(stream) + if err != nil { + return "", -1, err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return "", -1, err + } + + digester := digest.Canonical.Digester() + + written, err := io.Copy(digester.Hash(), its) + if err != nil { + return "", -1, err + } + + return digester.Digest(), written, nil +} + +func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") + if err := ensureDirectoryExists(destinationPath); err != nil { + return err + } + defer func() { + os.Remove(blob.BlobPath) + os.RemoveAll(destinationPath) + }() + + var tarSplitOutput bytes.Buffer + uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) + if err != nil { + return err + } + + if os.Getuid() == 0 { + if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { + return err + } + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { + return err + } + } else { + os.MkdirAll(destinationPath, 0755) + if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { + return err + } + + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { + return err + } + } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), + fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, types.BlobInfo{}, err + } + d.repo = repo + } + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + size, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return false, types.BlobInfo{}, err + } + + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + + d.manifest = string(manifestBlob) + + if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { + return err + } + + manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) + if err := ensureParentDirectoryExists(manifestPath); err != nil { + return err + } + + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + d.digest = digest + + return ioutil.WriteFile(manifestPath, manifestBlob, 0644) +} + +// PutSignatures writes signatures to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + + path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) + if err := ensureParentDirectoryExists(path); err != nil { + return err + } + + for i, sig := range signatures { + signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) + if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { + return err + } + } + d.signaturesLen = len(signatures) + return nil +} + +func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := otbuiltin.OpenRepo(d.ref.repo) + if err != nil { + return err + } + + _, err = repo.PrepareTransaction() + if err != nil { + return err + } + + var selinuxHnd *C.struct_selabel_handle + + if os.Getuid() == 0 && selinux.GetEnabled() { + selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) + if selinuxHnd == nil { + return errors.Wrapf(err, "cannot open the SELinux DB") + } + + defer C.selabel_close(selinuxHnd) + } + + checkLayer := func(hash string) error { + blob := d.blobs[hash] + // if the blob is not present in d.blobs then it is already stored in OSTree, + // and we don't need to import it. + if blob == nil { + return nil + } + err := d.importBlob(selinuxHnd, repo, blob) + if err != nil { + return err + } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) + if err != nil { + return err + } + } + + manifestPath := filepath.Join(d.tmpDirPath, "manifest") + + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} + if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { + return err + } + + _, err = repo.CommitTransaction() + return err +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go new file mode 100644 index 000000000..4948ec664 --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -0,0 +1,430 @@ +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo + // get the compressed layer by its uncompressed checksum + compressed map[digest.Digest]digest.Digest +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { + return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { + var metadataKey string + if isCompressed { + metadataKey = "docker.uncompressed_size" + } else { + metadataKey = "docker.size" + } + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, metadataKey) + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be non-default instances. +func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + + blob := info.Digest.Hex() + + // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. + if s.compressed == nil { + _, err := s.LayerInfosForCopy(ctx, nil) + if err != nil { + return nil, -1, err + } + + } + compressedBlob, isCompressed := s.compressed[info.Digest] + if isCompressed { + blob = compressedBlob.Hex() + } + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := pgzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + mfz.Close() + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + rc := ioutils.NewReadCloserWrapper(ots, func() error { + getter.Close() + mfz.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := [][]byte{} + for i := int64(1); i <= lenSignatures; i++ { + sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sig, err := ioutil.ReadAll(sigReader) + if err != nil { + return nil, err + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + + updatedBlobInfos := []types.BlobInfo{} + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + + man, err := manifest.FromBlob(manifestBlob, manifestType) + + s.compressed = make(map[digest.Digest]digest.Digest) + + layerBlobs := man.LayerInfos() + + for _, layerBlob := range layerBlobs { + branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) + found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return nil, err + } + + found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return nil, err + } + + uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) + if err != nil { + return nil, err + } + uncompressedDigest := digest.Digest(uncompressedDigestStr) + blobInfo := types.BlobInfo{ + Digest: uncompressedDigest, + Size: uncompressedSize, + MediaType: layerBlob.MediaType, + } + s.compressed[uncompressedDigest] = layerBlob.Digest + updatedBlobInfos = append(updatedBlobInfos, blobInfo) + } + return updatedBlobInfos, nil +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go new file mode 100644 index 000000000..a55147b85 --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -0,0 +1,252 @@ +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +const defaultOSTreeRepo = "/ostree/repo" + +// Transport is an ImageTransport for ostree paths. +var Transport = ostreeTransport{} + +type ostreeTransport struct{} + +func (t ostreeTransport) Name() string { + return "ostree" +} + +func init() { + transports.Register(Transport) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { + sep := strings.Index(scope, ":") + if sep < 0 { + return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) + } + repo := scope[:sep] + + if !strings.HasPrefix(repo, "/") { + return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) + } + cleaned := filepath.Clean(repo) + if cleaned != repo { + return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + // FIXME? In the namespaces within a repo, + // we could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// ostreeReference is an ImageReference for ostree paths. +type ostreeReference struct { + image string + branchName string + repo string +} + +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + +func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { + var repo = "" + var image = "" + s := strings.SplitN(ref, "@/", 2) + if len(s) == 1 { + image, repo = s[0], defaultOSTreeRepo + } else { + image, repo = s[0], "/"+s[1] + } + + return NewReference(image, repo) +} + +// NewReference returns an OSTree reference for a specified repo and image. +func NewReference(image string, repo string) (types.ImageReference, error) { + // image is not _really_ in a containers/image/docker/reference format; + // as far as the libOSTree ociimage/* namespace is concerned, it is more or + // less an arbitrary string with an implied tag. + // Parse the image using reference.ParseNormalizedNamed so that we can + // check whether the images has a tag specified and we can add ":latest" if needed + ostreeImage, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if reference.IsNameOnly(ostreeImage) { + image = image + ":latest" + } + + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) + if err != nil { + // With os.IsNotExist(err), the parent directory of repo is also not existent; + // that should ordinarily not happen, but it would be a bit weird to reject + // references which do not specify a repo just because the implicit defaultOSTreeRepo + // does not exist. + if os.IsNotExist(err) && repo == defaultOSTreeRepo { + resolved = repo + } else { + return nil, err + } + } + // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces + // from being ambiguous with values of PolicyConfigurationIdentity. + if strings.Contains(resolved, ":") { + return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) + } + + return ostreeReference{ + image: image, + branchName: encodeOStreeRef(image), + repo: resolved, + }, nil +} + +func (ref ostreeReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ostreeReference) StringWithinTransport() string { + return fmt.Sprintf("%s@%s", ref.image, ref.repo) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ostreeReference) DockerReference() reference.Named { + return nil +} + +func (ref ostreeReference) PolicyConfigurationIdentity() string { + return fmt.Sprintf("%s:%s", ref.repo, ref.image) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ostreeReference) PolicyConfigurationNamespaces() []string { + s := strings.SplitN(ref.image, ":", 2) + if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. + panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) + } + name := s[0] + res := []string{} + for { + res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} + +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + src, err := newImageSource(tmpDir, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageSource(tmpDir, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageDestination(ref, tmpDir) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for ostree: images") +} + +var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) + +func encodeOStreeRef(in string) string { + var buffer bytes.Buffer + for i := range in { + sub := in[i : i+1] + if ostreeRefRegexp.MatchString(sub) { + buffer.WriteString(sub) + } else { + buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) + } + + } + return buffer.String() +} + +// manifestPath returns a path for the manifest within a ostree using our conventions. +func (ref ostreeReference) manifestPath() string { + return filepath.Join("manifest", "manifest.json") +} + +// signaturePath returns a path for a signature within a ostree using our conventions. +func (ref ostreeReference) signaturePath(index int) string { + return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go new file mode 100644 index 000000000..9c9a17a58 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go @@ -0,0 +1,332 @@ +// Package boltdb implements a BlobInfoCache backed by BoltDB. +package boltdb + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + bolt "github.com/etcd-io/bbolt" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + + // FIXME: For CRI-O, does this need to hide information between different users? + + // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. + uncompressedDigestBucket = []byte("uncompressedDigest") + // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest + // (as a set of key=digest, value="" pairs) + digestByUncompressedBucket = []byte("digestByUncompressed") + // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing + // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). + knownLocationsBucket = []byte("knownLocations") +) + +// Concurrency: +// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely +// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. + +// pathLock contains a lock for a specific BoltDB database path. +type pathLock struct { + refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! + mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. +} + +var ( + // pathLocks contains a lock for each currently open file. + // This must be global so that independently created instances of boltDBCache exclude each other. + // The map is protected by pathLocksMutex. + // FIXME? Should this be based on device:inode numbers instead of paths instead? + pathLocks = map[string]*pathLock{} + pathLocksMutex = sync.Mutex{} +) + +// lockPath obtains the pathLock for path. +// The caller must call unlockPath eventually. +func lockPath(path string) { + pl := func() *pathLock { // A scope for defer + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if ok { + pl.refCount++ + } else { + pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} + pathLocks[path] = pl + } + return pl + }() + pl.mutex.Lock() +} + +// unlockPath releases the pathLock for path. +func unlockPath(path string) { + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if !ok { + // Should this return an error instead? BlobInfoCache ultimately ignores errors… + panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) + } + pl.mutex.Unlock() + pl.refCount-- + if pl.refCount == 0 { + delete(pathLocks, path) + } +} + +// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. +// +// Note that we don’t keep the database open across operations, because that would lock the file and block any other +// users; instead, we need to open/close it for every single write or lookup. +type cache struct { + path string +} + +// New returns a BlobInfoCache implementation which uses a BoltDB file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) types.BlobInfoCache { + return &cache{path: path} +} + +// view returns runs the specified fn within a read-only transaction on the database. +func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { + // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, + // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding + // a read lock, blocking any future writes. + // Hence this preliminary check, which is RACY: Another process could remove the file + // between the Lstat call and opening the database. + if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { + return err + } + + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.View(fn) +} + +// update returns runs the specified fn within a read-write transaction on the database. +func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, nil) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.Update(fn) +} + +// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. +func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { + if b := tx.Bucket(uncompressedDigestBucket); b != nil { + if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { + d, err := digest.Parse(string(uncompressedBytes)) + if err == nil { + return d + } + // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + } + // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if b := tx.Bucket(digestByUncompressedBucket); b != nil { + if b = b.Bucket([]byte(anyDigest.String())); b != nil { + c := b.Cursor() + if k, _ := c.First(); k != nil { // The bucket is non-empty + return anyDigest + } + } + } + return "" +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + var res digest.Digest + if err := bdc.view(func(tx *bolt.Tx) error { + res = bdc.uncompressedDigest(tx, anyDigest) + return nil + }); err != nil { // Including os.IsNotExist(err) + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + previous, err := digest.Parse(string(previousBytes)) + if err != nil { + return err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if err := b.Put(key, []byte(uncompressed.String())); err != nil { + return err + } + + b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) + if err != nil { + return err + } + if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) + if err != nil { + return err + } + value, err := time.Now().MarshalBinary() + if err != nil { + return err + } + if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { + b := scopeBucket.Bucket([]byte(digest.String())) + if b == nil { + return candidates + } + _ = b.ForEach(func(k, v []byte) error { + t := time.Time{} + if err := t.UnmarshalBinary(v); err != nil { + return err + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: types.BICLocationReference{Opaque: string(k)}, + }, + LastSeen: t, + }) + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + res := []prioritize.CandidateWithTime{} + var uncompressedDigestValue digest.Digest // = "" + if err := bdc.view(func(tx *bolt.Tx) error { + scopeBucket := tx.Bucket(knownLocationsBucket) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) + if scopeBucket == nil { + return nil + } + + res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) + if canSubstitute { + if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { + b := tx.Bucket(digestByUncompressedBucket) + if b != nil { + b = b.Bucket([]byte(uncompressedDigestValue.String())) + if b != nil { + if err := b.ForEach(func(k, _ []byte) error { + d, err := digest.Parse(string(k)) + if err != nil { + return err + } + if d != primaryDigest && d != uncompressedDigestValue { + res = bdc.appendReplacementCandidates(res, scopeBucket, d) + } + return nil + }); err != nil { + return err + } + } + } + if uncompressedDigestValue != primaryDigest { + res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) + } + } + } + return nil + }); err != nil { // Including os.IsNotExist(err) + return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go new file mode 100644 index 000000000..952bcf5a1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -0,0 +1,75 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/containers/image/v5/pkg/blobinfocache/boltdb" + "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.boltdb" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, getRootlessUID()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return memory.New() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) + return memory.New() + } + + logrus.Debugf("Using blob info cache at %s", path) + return boltdb.New(path) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 000000000..5deca4a82 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,110 @@ +// Package prioritize provides utilities for prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "sort" + "time" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + Candidate types.BICReplacementCandidate // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) +} + +// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, +// along with the specially-treated digest values for the implementation of sort.Interface.Less +type candidateSortState struct { + cs []CandidateWithTime // The entries to sort + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) Len() int { + return len(css.cs) +} + +func (css *candidateSortState) Less(i, j int) bool { + xi := css.cs[i] + xj := css.cs[j] + + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.Candidate.Digest != xj.Candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.Candidate.Digest == css.primaryDigest { + return true + } + if xj.Candidate.Digest == css.primaryDigest { + return false + } + if css.uncompressedDigest != "" { + if xi.Candidate.Digest == css.uncompressedDigest { + return false + } + if xj.Candidate.Digest == css.uncompressedDigest { + return true + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { + return xi.LastSeen.After(xj.LastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time + return xi.LastSeen.After(xj.LastSeen) + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return xi.Candidate.Digest < xj.Candidate.Digest +} + +func (css *candidateSortState) Swap(i, j int) { + css.cs[i], css.cs[j] = css.cs[j], css.cs[i] +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the +// number of entries to limit, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + sort.Sort(&candidateSortState{ + cs: cs, + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }) + + resLength := len(cs) + if resLength > maxCandidates { + resLength = maxCandidates + } + res := make([]types.BICReplacementCandidate, resLength) + for i := range res { + res[i] = cs[i].Candidate + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), +// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go new file mode 100644 index 000000000..8f28c6623 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,145 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + } +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = map[digest.Digest]struct{}{} + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + for l, t := range locations { + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: l, + }, + LastSeen: t, + }) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + for d := range otherDigests { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d) + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go new file mode 100644 index 000000000..fa1879afd --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -0,0 +1,49 @@ +// Package none implements a dummy BlobInfoCache which records no data. +package none + +import ( + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// noCache implements a dummy BlobInfoCache which records no data. +type noCache struct { +} + +// NoCache implements BlobInfoCache by not recording any data. +// +// This exists primarily for implementations of configGetter for +// Manifest.Inspect, because configs only have one representation. +// Any use of BlobInfoCache with blobs should usually use at least a +// short-lived cache, ideally blobinfocache.DefaultCache. +var NoCache types.BlobInfoCache = noCache{} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go new file mode 100644 index 000000000..04d231c6d --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -0,0 +1,149 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + "io/ioutil" + + "github.com/containers/image/v5/pkg/compression/internal" + "github.com/containers/image/v5/pkg/compression/types" + "github.com/klauspost/pgzip" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm("gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm("bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm("Xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm("zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return ioutil.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, level) +} + +// DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error detecting compression") + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error initializing decompression") + } + } else { + res = ioutil.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go new file mode 100644 index 000000000..6092a9517 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -0,0 +1,57 @@ +package internal + +import "io" + +// CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// The caller must call Close() on the stream (even if the input stream does not need closing!). +type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm struct { + name string + prefix []byte + decompressor DecompressorFunc + compressor CompressorFunc +} + +// NewAlgorithm creates an Algorithm instance. +// This function exists so that Algorithm instances can only be created by code that +// is allowed to import this internal subpackage. +func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + return Algorithm{ + name: name, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, + } +} + +// Name returns the name for the compression algorithm. +func (c Algorithm) Name() string { + return c.name +} + +// AlgorithmCompressor returns the compressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmCompressor(algo Algorithm) CompressorFunc { + return algo.compressor +} + +// AlgorithmDecompressor returns the decompressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { + return algo.decompressor +} + +// AlgorithmPrefix returns the prefix field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmPrefix(algo Algorithm) []byte { + return algo.prefix +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go new file mode 100644 index 000000000..f96eff2e3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go @@ -0,0 +1,13 @@ +package types + +import ( + "github.com/containers/image/v5/pkg/compression/internal" +) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// Algorithm is a compression algorithm provided and supported by pkg/compression. +// It can’t be supplied from the outside. +type Algorithm = internal.Algorithm diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go new file mode 100644 index 000000000..962fe9676 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go new file mode 100644 index 000000000..b7dddd0d6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -0,0 +1,363 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/types" + helperclient "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerAuthConfig struct { + Auth string `json:"auth,omitempty"` +} + +type dockerConfigFile struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` +} + +type authPath struct { + path string + legacyFormat bool +} + +var ( + defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") + xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") + dockerHomePath = filepath.FromSlash(".docker/config.json") + dockerLegacyHomePath = ".dockercfg" + + enableKeyring = false + + // ErrNotLoggedIn is returned for users not logged into a registry + // that they are trying to logout of + ErrNotLoggedIn = errors.New("not logged in") + // ErrNotSupported is returned for unsupported methods + ErrNotSupported = errors.New("not supported") +) + +// SetAuthentication stores the username and password in the auth.json file +func SetAuthentication(sys *types.SystemContext, registry, username, password string) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if ch, exists := auths.CredHelpers[registry]; exists { + return false, setAuthToCredHelper(ch, registry, username, password) + } + + // Set the credentials to kernel keyring if enableKeyring is true. + // The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms. + // Hence, we want to fall-back to using the authfile in case the keyring failed. + // However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring. + if enableKeyring { + err := setAuthToKernelKeyring(registry, username, password) + if err == nil { + logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username) + return false, nil + } + logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err) + } + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + auths.AuthConfigs[registry] = newCreds + return true, nil + }) +} + +// GetAuthentication returns the registry credentials stored in +// either auth.json file or .docker/config.json +// If an entry is not found empty strings are returned for the username and password +func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { + if sys != nil && sys.DockerAuthConfig != nil { + logrus.Debug("Returning credentials from DockerAuthConfig") + return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil + } + + if enableKeyring { + username, password, err := getAuthFromKernelKeyring(registry) + if err == nil { + logrus.Debug("returning credentials from kernel keyring") + return username, password, nil + } + } + + paths := []authPath{} + pathToAuth, lf, err := getPathToAuth(sys) + if err == nil { + paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf}) + } else { + // Error means that the path set for XDG_RUNTIME_DIR does not exist + // but we don't want to completely fail in the case that the user is pulling a public image + // Logging the error as a warning instead and moving on to pulling the image + logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) + } + paths = append(paths, + authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false}, + authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true}) + + for _, path := range paths { + username, password, err := findAuthentication(registry, path.path, path.legacyFormat) + if err != nil { + logrus.Debugf("Credentials not found") + return "", "", err + } + if username != "" && password != "" { + logrus.Debugf("Returning credentials from %s", path.path) + return username, password, nil + } + } + logrus.Debugf("Credentials not found") + return "", "", nil +} + +// RemoveAuthentication deletes the credentials stored in auth.json +func RemoveAuthentication(sys *types.SystemContext, registry string) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + // First try cred helpers. + if ch, exists := auths.CredHelpers[registry]; exists { + return false, deleteAuthFromCredHelper(ch, registry) + } + + // Next if keyring is enabled try kernel keyring + if enableKeyring { + err := deleteAuthFromKernelKeyring(registry) + if err == nil { + logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry) + return false, nil + } + logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles") + } + + if _, ok := auths.AuthConfigs[registry]; ok { + delete(auths.AuthConfigs, registry) + } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { + delete(auths.AuthConfigs, normalizeRegistry(registry)) + } else { + return false, ErrNotLoggedIn + } + return true, nil + }) +} + +// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring +func RemoveAllAuthentication(sys *types.SystemContext) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if enableKeyring { + err := removeAllAuthFromKernelKeyring() + if err == nil { + logrus.Debugf("removing all credentials from kernel keyring") + return false, nil + } + logrus.Debugf("error removing credentials from kernel keyring") + } + auths.CredHelpers = make(map[string]string) + auths.AuthConfigs = make(map[string]dockerAuthConfig) + return true, nil + }) +} + +// getPath gets the path of the auth.json file +// The path can be overriden by the user if the overwrite-path flag is set +// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers +// Otherwise, the auth.json file is stored in /run/containers/UID +func getPathToAuth(sys *types.SystemContext) (string, bool, error) { + if sys != nil { + if sys.AuthFilePath != "" { + return sys.AuthFilePath, false, nil + } + if sys.LegacyFormatAuthFilePath != "" { + return sys.LegacyFormatAuthFilePath, true, nil + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil + } + } + + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. + // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. + _, err := os.Stat(runtimeDir) + if os.IsNotExist(err) { + // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory + // or made a typo while setting the environment variable, + // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. + return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) + } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. + return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil + } + return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil +} + +// readJSONFile unmarshals the authentications stored in the auth.json file and returns it +// or returns an empty dockerConfigFile data structure if auth.json does not exist +// if the file exists and is empty, readJSONFile returns an error +func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { + var auths dockerConfigFile + + raw, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + auths.AuthConfigs = map[string]dockerAuthConfig{} + return auths, nil + } + return dockerConfigFile{}, err + } + + if legacyFormat { + if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + return auths, nil + } + + if err = json.Unmarshal(raw, &auths); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + + return auths, nil +} + +// modifyJSON writes to auth.json if the dockerConfigFile has been updated +func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { + path, legacyFormat, err := getPathToAuth(sys) + if err != nil { + return err + } + + dir := filepath.Dir(path) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0700); err != nil { + return errors.Wrapf(err, "error creating directory %q", dir) + } + } + + if legacyFormat { + return fmt.Errorf("writes to %s using legacy format are not supported", path) + } + auths, err := readJSONFile(path, false) + if err != nil { + return errors.Wrapf(err, "error reading JSON file %q", path) + } + + updated, err := editor(&auths) + if err != nil { + return errors.Wrapf(err, "error updating %q", path) + } + if updated { + newData, err := json.MarshalIndent(auths, "", "\t") + if err != nil { + return errors.Wrapf(err, "error marshaling JSON %q", path) + } + + if err = ioutil.WriteFile(path, newData, 0755); err != nil { + return errors.Wrapf(err, "error writing to file %q", path) + } + } + + return nil +} + +func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds, err := helperclient.Get(p, registry) + if err != nil { + return "", "", err + } + return creds.Username, creds.Secret, nil +} + +func setAuthToCredHelper(credHelper, registry, username, password string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds := &credentials.Credentials{ + ServerURL: registry, + Username: username, + Secret: password, + } + return helperclient.Store(p, creds) +} + +func deleteAuthFromCredHelper(credHelper, registry string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.Erase(p, registry) +} + +// findAuthentication looks for auth of registry in path +func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { + auths, err := readJSONFile(path, legacyFormat) + if err != nil { + return "", "", errors.Wrapf(err, "error reading JSON file %q", path) + } + + // First try cred helpers. They should always be normalized. + if ch, exists := auths.CredHelpers[registry]; exists { + return getAuthFromCredHelper(ch, registry) + } + + // I'm feeling lucky + if val, exists := auths.AuthConfigs[registry]; exists { + return decodeDockerAuth(val.Auth) + } + + // bad luck; let's normalize the entries first + registry = normalizeRegistry(registry) + normalizedAuths := map[string]dockerAuthConfig{} + for k, v := range auths.AuthConfigs { + normalizedAuths[normalizeRegistry(k)] = v + } + if val, exists := normalizedAuths[registry]; exists { + return decodeDockerAuth(val.Auth) + } + return "", "", nil +} + +func decodeDockerAuth(s string) (string, string, error) { + decoded, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + // if it's invalid just skip, as docker does + return "", "", nil + } + user := parts[0] + password := strings.Trim(parts[1], "\x00") + return user, password, nil +} + +// convertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry/auth.go +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +func normalizeRegistry(registry string) string { + normalized := convertToHostname(registry) + switch normalized { + case "registry-1.docker.io", "docker.io": + return "index.docker.io" + } + return normalized +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go new file mode 100644 index 000000000..43f2d5a85 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go @@ -0,0 +1,115 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/internal/pkg/keyctl" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const keyDescribePrefix = "container-registry-login:" + +func getAuthFromKernelKeyring(registry string) (string, string, error) { + userkeyring, err := keyctl.UserKeyring() + if err != nil { + return "", "", err + } + key, err := userkeyring.Search(genDescription(registry)) + if err != nil { + return "", "", err + } + authData, err := key.Get() + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(authData), "\x00", 2) + if len(parts) != 2 { + return "", "", nil + } + return parts[0], parts[1], nil +} + +func deleteAuthFromKernelKeyring(registry string) error { + userkeyring, err := keyctl.UserKeyring() + + if err != nil { + return err + } + key, err := userkeyring.Search(genDescription(registry)) + if err != nil { + return err + } + return key.Unlink() +} + +func removeAllAuthFromKernelKeyring() error { + keys, err := keyctl.ReadUserKeyring() + if err != nil { + return err + } + + userkeyring, err := keyctl.UserKeyring() + if err != nil { + return err + } + + for _, k := range keys { + keyAttr, err := k.Describe() + if err != nil { + return err + } + // split string "type;uid;gid;perm;description" + keyAttrs := strings.SplitN(keyAttr, ";", 5) + if len(keyAttrs) < 5 { + return errors.Errorf("Key attributes of %d are not avaliable", k.ID()) + } + keyDescribe := keyAttrs[4] + if strings.HasPrefix(keyDescribe, keyDescribePrefix) { + err := keyctl.Unlink(userkeyring, k) + if err != nil { + return errors.Wrapf(err, "error unlinking key %d", k.ID()) + } + logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) + } + } + return nil +} + +func setAuthToKernelKeyring(registry, username, password string) error { + keyring, err := keyctl.SessionKeyring() + if err != nil { + return err + } + id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) + if err != nil { + return err + } + + // sets all permission(view,read,write,search,link,set attribute) for current user + // it enables the user to search the key after it linked to user keyring and unlinked from session keyring + err = keyctl.SetPerm(id, keyctl.PermUserAll) + if err != nil { + return err + } + // link the key to userKeyring + userKeyring, err := keyctl.UserKeyring() + if err != nil { + return errors.Wrapf(err, "error getting user keyring") + } + err = keyctl.Link(userKeyring, id) + if err != nil { + return errors.Wrapf(err, "error linking the key to user keyring") + } + // unlink the key from session keyring + err = keyctl.Unlink(keyring, id) + if err != nil { + return errors.Wrapf(err, "error unlinking the key from session keyring") + } + return nil +} + +func genDescription(registry string) string { + return fmt.Sprintf("%s%s", keyDescribePrefix, registry) +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go new file mode 100644 index 000000000..9b0e8bee2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux +// +build !386 !amd64 + +package config + +func getAuthFromKernelKeyring(registry string) (string, string, error) { + return "", "", ErrNotSupported +} + +func deleteAuthFromKernelKeyring(registry string) error { + return ErrNotSupported +} + +func setAuthToKernelKeyring(registry, username, password string) error { + return ErrNotSupported +} + +func removeAllAuthFromKernelKeyring() error { + return ErrNotSupported +} diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/README.md b/vendor/github.com/containers/image/v5/pkg/strslice/README.md new file mode 100644 index 000000000..ae6097e82 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/README.md @@ -0,0 +1 @@ +This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go new file mode 100644 index 000000000..bad493fb8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go new file mode 100644 index 000000000..ff802cefd --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -0,0 +1,482 @@ +package sysregistriesv2 + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// systemRegistriesConfPath is the path to the system-wide registry +// configuration file and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' +var systemRegistriesConfPath = builtinRegistriesConfPath + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/etc/containers/registries.conf" + +// Endpoint describes a remote location of a registry. +type Endpoint struct { + // The endpoint's remote location. + Location string `toml:"location,omitempty"` + // If true, certs verification will be skipped and HTTP (non-TLS) + // connections will be allowed. + Insecure bool `toml:"insecure,omitempty"` +} + +// rewriteReference will substitute the provided reference `prefix` to the +// endpoints `location` from the `ref` and creates a new named reference from it. +// The function errors if the newly created reference is not parsable. +func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { + refString := ref.String() + if !refMatchesPrefix(refString, prefix) { + return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) + } + + newNamedRef := strings.Replace(refString, prefix, e.Location, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, errors.Wrapf(err, "error rewriting reference") + } + logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String()) + return newParsedRef, nil +} + +// Registry represents a registry. +type Registry struct { + // Prefix is used for matching images, and to translate one namespace to + // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` + // and we pull from "example.com/bar/myimage:latest", the image will + // effectively be pulled from "example.com/foo/bar/myimage:latest". + // If no Prefix is specified, it defaults to the specified location. + Prefix string `toml:"prefix"` + // A registry is an Endpoint too + Endpoint + // The registry's mirrors. + Mirrors []Endpoint `toml:"mirror,omitempty"` + // If true, pulling from the registry will be blocked. + Blocked bool `toml:"blocked,omitempty"` + // If true, mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` +} + +// PullSource consists of an Endpoint and a Reference. Note that the reference is +// rewritten according to the registries prefix and the Endpoint's location. +type PullSource struct { + Endpoint Endpoint + Reference reference.Named +} + +// PullSourcesFromReference returns a slice of PullSource's based on the passed +// reference. +func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { + var endpoints []Endpoint + + if r.MirrorByDigestOnly { + // Only use mirrors when the reference is a digest one. + if _, isDigested := ref.(reference.Canonical); isDigested { + endpoints = append(r.Mirrors, r.Endpoint) + } else { + endpoints = []Endpoint{r.Endpoint} + } + } else { + endpoints = append(r.Mirrors, r.Endpoint) + } + + sources := []PullSource{} + for _, ep := range endpoints { + rewritten, err := ep.rewriteReference(ref, r.Prefix) + if err != nil { + return nil, err + } + sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) + } + + return sources, nil +} + +// V1TOMLregistries is for backwards compatibility to sysregistries v1 +type V1TOMLregistries struct { + Registries []string `toml:"registries"` +} + +// V1TOMLConfig is for backwards compatibility to sysregistries v1 +type V1TOMLConfig struct { + Search V1TOMLregistries `toml:"search"` + Insecure V1TOMLregistries `toml:"insecure"` + Block V1TOMLregistries `toml:"block"` +} + +// V1RegistriesConf is the sysregistries v1 configuration format. +type V1RegistriesConf struct { + V1TOMLConfig `toml:"registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V1RegistriesConf) Nonempty() bool { + return (len(config.V1TOMLConfig.Search.Registries) != 0 || + len(config.V1TOMLConfig.Insecure.Registries) != 0 || + len(config.V1TOMLConfig.Block.Registries) != 0) +} + +// V2RegistriesConf is the sysregistries v2 configuration format. +type V2RegistriesConf struct { + Registries []Registry `toml:"registry"` + // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references + UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V2RegistriesConf) Nonempty() bool { + return (len(config.Registries) != 0 || + len(config.UnqualifiedSearchRegistries) != 0) +} + +// tomlConfig is the data type used to unmarshal the toml config. +type tomlConfig struct { + V2RegistriesConf + V1RegistriesConf // for backwards compatibility with sysregistries v1 +} + +// InvalidRegistries represents an invalid registry configurations. An example +// is when "registry.com" is defined multiple times in the configuration but +// with conflicting security settings. +type InvalidRegistries struct { + s string +} + +// Error returns the error string. +func (e *InvalidRegistries) Error() string { + return e.s +} + +// parseLocation parses the input string, performs some sanity checks and returns +// the sanitized input string. An error is returned if the input string is +// empty or if contains an "http{s,}://" prefix. +func parseLocation(input string) (string, error) { + trimmed := strings.TrimRight(input, "/") + + if trimmed == "" { + return "", &InvalidRegistries{s: "invalid location: cannot be empty"} + } + + if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { + msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) + return "", &InvalidRegistries{s: msg} + } + + return trimmed, nil +} + +// ConvertToV2 returns a v2 config corresponding to a v1 one. +func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { + regMap := make(map[string]*Registry) + // The order of the registries is not really important, but make it deterministic (the same for the same config file) + // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. + registryOrder := []string{} + + getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object + var err error + location, err = parseLocation(location) + if err != nil { + return nil, err + } + reg, exists := regMap[location] + if !exists { + reg = &Registry{ + Endpoint: Endpoint{Location: location}, + Mirrors: []Endpoint{}, + Prefix: location, + } + regMap[location] = reg + registryOrder = append(registryOrder, location) + } + return reg, nil + } + + for _, blocked := range config.V1TOMLConfig.Block.Registries { + reg, err := getRegistry(blocked) + if err != nil { + return nil, err + } + reg.Blocked = true + } + for _, insecure := range config.V1TOMLConfig.Insecure.Registries { + reg, err := getRegistry(insecure) + if err != nil { + return nil, err + } + reg.Insecure = true + } + + res := &V2RegistriesConf{ + UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, + } + for _, location := range registryOrder { + reg := regMap[location] + res.Registries = append(res.Registries, *reg) + } + return res, nil +} + +// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. +var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") + +// postProcess checks the consistency of all the configuration, looks for conflicts, +// and normalizes the configuration (e.g., sets the Prefix to Location if not set). +func (config *V2RegistriesConf) postProcess() error { + regMap := make(map[string][]*Registry) + + for i := range config.Registries { + reg := &config.Registries[i] + // make sure Location and Prefix are valid + var err error + reg.Location, err = parseLocation(reg.Location) + if err != nil { + return err + } + + if reg.Prefix == "" { + reg.Prefix = reg.Location + } else { + reg.Prefix, err = parseLocation(reg.Prefix) + if err != nil { + return err + } + } + + // make sure mirrors are valid + for _, mir := range reg.Mirrors { + mir.Location, err = parseLocation(mir.Location) + if err != nil { + return err + } + } + regMap[reg.Location] = append(regMap[reg.Location], reg) + } + + // Given a registry can be mentioned multiple times (e.g., to have + // multiple prefixes backed by different mirrors), we need to make sure + // there are no conflicts among them. + // + // Note: we need to iterate over the registries array to ensure a + // deterministic behavior which is not guaranteed by maps. + for _, reg := range config.Registries { + others, _ := regMap[reg.Location] + for _, other := range others { + if reg.Insecure != other.Insecure { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + if reg.Blocked != other.Blocked { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + } + } + + for i := range config.UnqualifiedSearchRegistries { + registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) + if err != nil { + return err + } + if !anchoredDomainRegexp.MatchString(registry) { + return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} + } + config.UnqualifiedSearchRegistries[i] = registry + } + + return nil +} + +// ConfigPath returns the path to the system-wide registry configuration file. +func ConfigPath(ctx *types.SystemContext) string { + confPath := systemRegistriesConfPath + if ctx != nil { + if ctx.SystemRegistriesConfPath != "" { + confPath = ctx.SystemRegistriesConfPath + } else if ctx.RootForImplicitAbsolutePaths != "" { + confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) + } + } + return confPath +} + +// configMutex is used to synchronize concurrent accesses to configCache. +var configMutex = sync.Mutex{} + +// configCache caches already loaded configs with config paths as keys and is +// used to avoid redudantly parsing configs. Concurrent accesses to the cache +// are synchronized via configMutex. +var configCache = make(map[string]*V2RegistriesConf) + +// InvalidateCache invalidates the registry cache. This function is meant to be +// used for long-running processes that need to reload potential changes made to +// the cached registry config files. +func InvalidateCache() { + configMutex.Lock() + defer configMutex.Unlock() + configCache = make(map[string]*V2RegistriesConf) +} + +// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. +func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { + configPath := ConfigPath(ctx) + + configMutex.Lock() + // if the config has already been loaded, return the cached registries + if config, inCache := configCache[configPath]; inCache { + configMutex.Unlock() + return config, nil + } + configMutex.Unlock() + + return TryUpdatingCache(ctx) +} + +// TryUpdatingCache loads the configuration from the provided `SystemContext` +// without using the internal cache. On success, the loaded configuration will +// be added into the internal registry cache. +func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { + configPath := ConfigPath(ctx) + + configMutex.Lock() + defer configMutex.Unlock() + + // load the config + config, err := loadRegistryConf(configPath) + if err != nil { + // Return an empty []Registry if we use the default config, + // which implies that the config path of the SystemContext + // isn't set. Note: if ctx.SystemRegistriesConfPath points to + // the default config, we will still return an error. + if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { + return &V2RegistriesConf{Registries: []Registry{}}, nil + } + return nil, err + } + + v2Config := &config.V2RegistriesConf + + // backwards compatibility for v1 configs + if config.V1RegistriesConf.Nonempty() { + if config.V2RegistriesConf.Nonempty() { + return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} + } + v2, err := config.V1RegistriesConf.ConvertToV2() + if err != nil { + return nil, err + } + v2Config = v2 + } + + if err := v2Config.postProcess(); err != nil { + return nil, err + } + + // populate the cache + configCache[configPath] = v2Config + return v2Config, nil +} + +// GetRegistries loads and returns the registries specified in the config. +// Note the parsed content of registry config files is cached. For reloading, +// use `InvalidateCache` and re-call `GetRegistries`. +func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.Registries, nil +} + +// UnqualifiedSearchRegistries returns a list of host[:port] entries to try +// for unqualified image search, in the returned order) +func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.UnqualifiedSearchRegistries, nil +} + +// refMatchesPrefix returns true iff ref, +// which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value. +// (This is split from the caller primarily to make testing easier.) +func refMatchesPrefix(ref, prefix string) bool { + switch { + case len(ref) < len(prefix): + return false + case len(ref) == len(prefix): + return ref == prefix + case len(ref) > len(prefix): + if !strings.HasPrefix(ref, prefix) { + return false + } + c := ref[len(prefix)] + // This allows "example.com:5000" to match "example.com", + // which is unintended; that will get fixed eventually, DON'T RELY + // ON THE CURRENT BEHAVIOR. + return c == ':' || c == '/' || c == '@' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// FindRegistry returns the Registry with the longest prefix for ref, +// which is a registry, repository namespace repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!). +// If no Registry prefixes the image, nil is returned. +func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + + reg := Registry{} + prefixLen := 0 + for _, r := range config.Registries { + if refMatchesPrefix(ref, r.Prefix) { + length := len(r.Prefix) + if length > prefixLen { + reg = r + prefixLen = length + } + } + } + if prefixLen != 0 { + return ®, nil + } + return nil, nil +} + +// Loads the registry configuration file from the filesystem and then unmarshals +// it. Returns the unmarshalled object. +func loadRegistryConf(configPath string) (*tomlConfig, error) { + config := &tomlConfig{} + + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + + err = toml.Unmarshal(configBytes, &config) + return config, err +} diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go new file mode 100644 index 000000000..6785564e8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -0,0 +1,112 @@ +package tlsclientconfig + +import ( + "crypto/tls" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc +func SetupCertificates(dir string, tlsc *tls.Config) error { + logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) + fs, err := ioutil.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + if os.IsPermission(err) { + logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) + return nil + } + return err + } + + for _, f := range fs { + fullPath := filepath.Join(dir, f.Name()) + if strings.HasSuffix(f.Name(), ".crt") { + logrus.Debugf(" crt: %s", fullPath) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + if os.IsNotExist(err) { + // Dangling symbolic link? + // Race with someone who deleted the + // file after we read the directory's + // list of contents? + logrus.Warnf("error reading certificate %q: %v", fullPath, err) + continue + } + return err + } + if tlsc.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return errors.Wrap(err, "unable to get system cert pool") + } + tlsc.RootCAs = systemPool + } + tlsc.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf(" cert: %s", fullPath) + if !hasFile(fs, keyName) { + return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) + if err != nil { + return err + } + tlsc.Certificates = append(tlsc.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf(" key: %s", fullPath) + if !hasFile(fs, certName) { + return errors.Errorf("missing client certificate %s for key %s", certName, keyName) + } + } + } + return nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// NewTransport Creates a default transport +func NewTransport() *http.Transport { + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + tr.Dial = proxyDialer.Dial + } + return tr +} diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go new file mode 100644 index 000000000..07fdd42a9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -0,0 +1,65 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/opencontainers/go-digest" +) + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + return sig.sign(mech, keyIdentity) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, err + } + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if keyIdentity != expectedKeyIdentity { + return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} + } + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} + } + if signedRef.String() != expectedRef.String() { + return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", + signedDockerReference, expectedDockerReference)} + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} + } + return nil + }, + }) + if err != nil { + return nil, err + } + return sig, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/json.go b/vendor/github.com/containers/image/v5/signature/json.go new file mode 100644 index 000000000..9e592863d --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/json.go @@ -0,0 +1,88 @@ +package signature + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// jsonFormatError is returned when JSON does not match expected format. +type jsonFormatError string + +func (err jsonFormatError) Error() string { + return string(err) +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { + seenKeys := map[string]struct{}{} + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t != json.Delim('{') { + return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + } + for { + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + } + if _, ok := seenKeys[key]; ok { + return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + } + seenKeys[key] = struct{}{} + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return jsonFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return jsonFormatError("Unexpected data after JSON object") + } + return nil +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { + seenKeys := map[string]struct{}{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if valuePtr, ok := exactFields[key]; ok { + seenKeys[key] = struct{}{} + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if _, ok := seenKeys[key]; !ok { + return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go new file mode 100644 index 000000000..bdf26c531 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -0,0 +1,85 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "strings" + + "golang.org/x/crypto/openpgp" +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to +// eliminate ambiguities, support CA signatures and perhaps other key properties) +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls ot this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism(blob) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go new file mode 100644 index 000000000..4825ab27c --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -0,0 +1,175 @@ +// +build !containers_image_openpgp + +package signature + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + + "github.com/mtrmac/gpgme" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: "", + }, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + mech := &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: dir, + } + keyIdentities, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + + removeDir = false + return mech, keyIdentities, nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go new file mode 100644 index 000000000..eccd610c9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -0,0 +1,159 @@ +// +build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + "github.com/containers/storage/pkg/homedir" + "golang.org/x/crypto/openpgp" +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code” detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch” path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go new file mode 100644 index 000000000..3eee70bc2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -0,0 +1,688 @@ +// policy_config.go hanles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + return NewPolicyFromFile(defaultPolicyPath(sys)) +} + +// defaultPolicyPath returns a path to the default policy of the system. +func defaultPolicyPath(sys *types.SystemContext) string { + if sys != nil { + if sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + } + return systemDefaultPolicyPath +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // transport can be nil + transport := transports.Get(key) + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) + } + if len(keyPath) > 0 && len(keyData) > 0 { + return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyData = false, false + var signedIdentity json.RawMessage + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && gotKeyData: + return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") + case gotKeyPath && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyData: + return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") + default: // Coverage: This should never happen + return errors.Errorf("Impossible keyPath/keyData presence combination!?") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it resturns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go new file mode 100644 index 000000000..e94de2a9c --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -0,0 +1,289 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the fuctions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for futher processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInvalid policyContextState = "" + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeContextState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport "%s" policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for futher processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: rename Signatures to UnverifiedSignatures + // FIXME: pass context.Context + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go new file mode 100644 index 000000000..55cdd3054 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go new file mode 100644 index 000000000..26cca4759 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -0,0 +1,130 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) + } + + if pr.KeyPath != "" && pr.KeyData != nil { + return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) + } + // FIXME: move this to per-context initialization + var data []byte + if pr.KeyData != nil { + data = pr.KeyData + } else { + d, err := ioutil.ReadFile(pr.KeyPath) + if err != nil { + return sarRejected, nil, err + } + data = d + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + for _, trustedIdentity := range trustedIdentities { + if keyIdentity == trustedIdentity { + return nil + } + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME: pass context.Context + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go new file mode 100644 index 000000000..f949088b5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go new file mode 100644 index 000000000..a148ede52 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -0,0 +1,101 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} + +func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go new file mode 100644 index 000000000..d3b33bb7a --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -0,0 +1,152 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/containers-policy.json.5.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "match-exact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go new file mode 100644 index 000000000..44e70b3b9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/signature.go @@ -0,0 +1,279 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + UntrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + UntrustedDockerManifestDigest: dockerManifestDigest, + UntrustedDockerReference: dockerReference, + UntrustedCreatorID: &creatorID, + UntrustedTimestamp: ×tamp, + } +} + +// Compile-time check that untrustedSignature implements json.Marshaler +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]interface{}{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, + } + optional := map[string]interface{}{} + if s.UntrustedCreatorID != nil { + optional["creator"] = *s.UntrustedCreatorID + } + if s.UntrustedTimestamp != nil { + optional["timestamp"] = *s.UntrustedTimestamp + } + signature := map[string]interface{}{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if _, ok := err.(jsonFormatError); ok { + err = InvalidSignatureError{msg: err.Error()} + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. +// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore interface{} + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.UntrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} + } + s.UntrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} + } + + var digestString string + if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + s.UntrustedDockerManifestDigest = digest.Digest(digestString) + + return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + "docker-reference": &s.UntrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { + return nil, err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, + DockerReference: unmatchedSignature.UntrustedDockerReference, + }, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventualy do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.UntrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go new file mode 100644 index 000000000..2b89f329f --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -0,0 +1,1039 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + stderrors "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // ErrBlobDigestMismatch is returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + ErrBlobDigestMismatch = stderrors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = stderrors.New("blob size mismatch") + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageSource struct { + imageRef storageReference + image *storage.Image + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice +} + +type storageImageDestination struct { + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice +} + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + +// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey +func manifestBigDataKey(digest digest.Digest) string { + return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() +} + +// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +func signatureBigDataKey(digest digest.Digest) string { + return "signature-" + digest.Encoded() +} + +// newImageSource sets up an image for reading. +func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. + img, err := imageRef.resolveImage(sys) + if err != nil { + return nil, err + } + + // Build the reader object. + image := &storageImageSource{ + imageRef: imageRef, + image: img, + layerPosition: make(map[digest.Digest]int), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + } + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, errors.Wrap(err, "error decoding metadata for source image") + } + } + return image, nil +} + +// Reference returns the image reference that we used to find this image. +func (s *storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s *storageImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *storageImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { + if info.Digest == image.GzippedEmptyLayerDigest { + return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + } + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err +} + +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + s.getBlobMutex.Lock() + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + s.getBlobMutex.Unlock() + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + key := manifestBigDataKey(*instanceDigest) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, "", errors.Wrapf(err, "error reading manifest for image instance %q", *instanceDigest) + } + return blob, manifest.GuessMIMEType(blob), err + } + if len(s.cachedManifest) == 0 { + // The manifest is stored as a big data item. + // Prefer the manifest corresponding to the user-specified digest, if available. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + key := manifestBigDataKey(digested.Digest()) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key + return nil, "", err + } + if err == nil { + s.cachedManifest = blob + } + } + } + // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. + // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). + if len(s.cachedManifest) == 0 { + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID) + } + if manifest.MIMETypeIsMultiImage(manifestType) { + return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)") + } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID) + } + + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + // This is actually a compressed type, but there's no uncompressed type defined + uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType + } + + physicalBlobInfos := []types.BlobInfo{} + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "error reading layer %q in image %q", layerID, s.image.ID) + } + if layer.UncompressedDigest == "" { + return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) + } + if layer.UncompressedSize < 0 { + return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + layerID = layer.Parent + } + + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + if err != nil { + return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID) + } + return res, nil +} + +// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, +// but using layer data which we can actually produce — physicalInfos for non-empty layers, +// and image.GzippedEmptyLayer for empty ones. +// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { + nextPhysical := 0 + res := make([]types.BlobInfo, len(manifestInfos)) + for i, mi := range manifestInfos { + if mi.EmptyLayer { + res[i] = types.BlobInfo{ + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + MediaType: mi.MediaType, + } + } else { + if nextPhysical >= len(physicalInfos) { + return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) + } + res[i] = physicalInfos[nextPhysical] + nextPhysical++ + } + } + if nextPhysical != len(physicalInfos) { + return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) + } + return res, nil +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { + var offset int + sigslice := [][]byte{} + signature := []byte{} + signatureSizes := s.SignatureSizes + key := "signatures" + instance := "default instance" + if instanceDigest != nil { + signatureSizes = s.SignaturesSizes[*instanceDigest] + key = signatureBigDataKey(*instanceDigest) + instance = instanceDigest.Encoded() + } + if len(signatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s)", s.image.ID, instance) + } + signature = signatureBlob + } + for _, length := range signatureSizes { + if offset+length > len(signature) { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length) + } + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } + image := &storageImageDestination{ + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + } + return image, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (s *storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up the temporary directory. +func (s *storageImageDestination) Close() error { + return os.RemoveAll(s.directory) +} + +func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { + // We ultimately have to decompress layers to populate trees on disk + // and need to explicitly ask for it here, so that the layers' MIME + // types can be set accordingly. + return types.PreserveOriginal +} + +func (s *storageImageDestination) computeNextBlobCacheFile() string { + return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Stores a layer or data blob in our temporary directory, checking that any information + // in the blobinfo matches the incoming data. + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + // Set up to digest the blob and count its size while saving it to a file. + hasher := digest.Canonical.Digester() + if blobinfo.Digest.Validate() == nil { + if a := blobinfo.Digest.Algorithm(); a.Available() { + hasher = a.Digester() + } + } + diffID := digest.Canonical.Digester() + filename := s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) + } + defer file.Close() + counter := ioutils.NewWriteCounter(hasher.Hash()) + reader := io.TeeReader(io.TeeReader(stream, counter), file) + decompressed, err := archive.DecompressStream(reader) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Ensure that any information that we were given about the blob is correct. + if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { + return errorBlobInfo, errors.WithStack(ErrBlobDigestMismatch) + } + if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch) + } + // Record information about the blob. + s.putBlobMutex.Lock() + s.blobDiffIDs[hasher.Digest()] = diffID.Digest() + s.fileSizes[hasher.Digest()] = counter.Count + s.filenames[hasher.Digest()] = filename + s.putBlobMutex.Unlock() + blobDigest := blobinfo.Digest + if blobDigest.Validate() != nil { + blobDigest = hasher.Digest() + } + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count + } + // This is safe because we have just computed both values ourselves. + cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + return types.BlobInfo{ + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, + }, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.putBlobMutex.Lock() + defer s.putBlobMutex.Unlock() + if blobinfo.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) + } + if err := blobinfo.Digest.Validate(); err != nil { + return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) + } + + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: size, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].CompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Does the blob correspond to a known DiffID which we already have available? + // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the + // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. + if canSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest) + } + if len(layers) > 0 { + if blobinfo.Size != -1 { + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, blobinfo, nil + } + if !canSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) + } + s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + } + } + + // Nope, we don't have it. + return false, types.BlobInfo{}, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m := m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + for i, compat := range m.ExtractedV1Compatibility { + if compat.ThrowAway { + continue + } + blobSum := m.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. + default: + return "" + } + id, err := m.ImageID(diffIDs) + if err != nil { + return "" + } + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := ioutil.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") +} + +func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + toplevelManifest, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return errors.Wrapf(err, "error retrieving top-level manifest") + } + // If the name we're saving to includes a digest, then check that the + // manifests that we're about to save all either match the one from the + // unparsedToplevel, or match the digest in the name that we're using. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + matches, err := manifest.MatchesDigest(s.manifest, digested.Digest()) + if err != nil { + return err + } + if !matches { + matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest()) + if err != nil { + return err + } + } + if !matches { + return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest()) + } + } + } + // Find the list of layer blobs. + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "error parsing manifest") + } + layerBlobs := man.LayerInfos() + // Extract or find the layers. + lastLayer := "" + for _, blob := range layerBlobs { + if blob.EmptyLayer { + continue + } + + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // that relies on using a blob digest that has never been seeen by the store had better call + // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only + // so far we are going to accommodate that (if we should be doing that at all). + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + if err != nil { + return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + continue + } + // Check if we previously cached a file with that blob's contents. If we didn't, + // then we need to read the desired contents from a layer. + filename, ok := s.filenames[blob.Digest] + if !ok { + // Try to find the layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Read the layer's contents. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + // Copy the layer diff to a file. Diff() takes a lock that it holds + // until the ReadCloser that it returns is closed, and PutLayer() wants + // the same lock, so the diff can't just be directly streamed from one + // to the other. + filename = s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + diff.Close() + return errors.Wrapf(err, "error creating temporary file %q", filename) + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using + // ctx.Done(). + _, err = io.Copy(file, diff) + diff.Close() + file.Close() + if err != nil { + return errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Make sure that we can find this file later, should we need the layer's + // contents again. + s.filenames[blob.Digest] = filename + } + // Read the cached blob and use it as a diff. + file, err := os.Open(filename) + if err != nil { + return errors.Wrapf(err, "error opening file %q", filename) + } + defer file.Close() + // Build the new layer using the diff, regardless of where it came from. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file) + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { + return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + } + + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = *inspect.Created + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + if err != nil { + if errors.Cause(err) != storage.ErrDuplicateID { + logrus.Debugf("error creating image: %q", err) + return errors.Wrapf(err, "error creating image %q", intendedID) + } + img, err = s.imageRef.transport.store.Image(intendedID) + if err != nil { + return errors.Wrapf(err, "error reading image %q", intendedID) + } + if img.TopLayer != lastLayer { + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) + } + logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) + } else { + logrus.Debugf("created new image ID %q", img.ID) + } + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} + } + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) + } + } + // Set the reference's name on the image. We don't need to worry about avoiding duplicate + // values because SetNames() will deduplicate the list that we pass to it. + if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { + names := []string{} + if name != nil { + names = append(names, name.String()) + } + if len(oldNames) > 0 { + names = append(names, oldNames...) + } + if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) + return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) + } + logrus.Debugf("set names of image %q to %v", img.ID, names) + } + // Save the unparsedToplevel's manifest. + if len(toplevelManifest) != 0 { + manifestDigest, err := manifest.Digest(toplevelManifest) + if err != nil { + return errors.Wrapf(err, "error digesting top-level manifest") + } + key := manifestBigDataKey(manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error saving top-level manifest for image %q", img.ID) + } + } + // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, + // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. + manifestDigest, err := manifest.Digest(s.manifest) + if err != nil { + return errors.Wrapf(err, "error computing manifest digest") + } + key := manifestBigDataKey(manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error saving manifest for image %q", img.ID) + } + key = storage.ImageDigestBigDataKey + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error saving manifest for image %q", img.ID) + } + // Save the signatures, if we have any. + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error saving signatures for image %q", img.ID) + } + } + for instanceDigest, signatures := range s.signatureses { + key := signatureBigDataKey(instanceDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error saving signatures for image %q", img.ID) + } + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error encoding metadata for image %q", img.ID) + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error saving metadata for image %q", img.ID) + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + return nil +} + +var manifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, +} + +func (s *storageImageDestination) SupportedManifestMIMETypes() []string { + return manifestMIMETypes +} + +// PutManifest writes the manifest to the destination. +func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + newBlob := make([]byte, len(manifestBlob)) + copy(newBlob, manifestBlob) + s.manifest = newBlob + return nil +} + +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). +func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be +// uploaded to the image destination, true otherwise. +func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (s *storageImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { + return true // Yes, we want the unmodified manifest +} + +// PutSignatures records the image's signatures for committing as a single data blob. +func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + sizes := []int{} + sigblob := []byte{} + for _, sig := range signatures { + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + if instanceDigest == nil { + s.signatures = sigblob + s.SignatureSizes = sizes + } + if instanceDigest == nil && len(s.manifest) > 0 { + manifestDigest, err := manifest.Digest(s.manifest) + if err != nil { + return err + } + instanceDigest = &manifestDigest + } + if instanceDigest != nil { + s.signatureses[*instanceDigest] = sigblob + s.SignaturesSizes[*instanceDigest] = sizes + } + return nil +} + +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image %q", s.image.ID) + } + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) + if err != nil { + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.image.ID) + } + sum += bigSize + } + // Add the signature sizes. + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + // Walk the layer list. + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + return sum, nil +} + +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, s) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImageCloser{ImageCloser: img, size: size}, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go new file mode 100644 index 000000000..4e137ad1b --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -0,0 +1,299 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "context" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +// Either "named" or "id" must be set. +type storageReference struct { + transport storageTransport + named reference.Named // may include a tag and/or a digest + id string +} + +func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { + if named == nil && id == "" { + return nil, ErrInvalidReference + } + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + named: named, + id: id, + }, nil +} + +// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref +func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { + repo := ref.Name() + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if named.Name() == repo { + return true + } + } + } + return false +} + +// imageMatchesSystemContext checks if the passed-in image both contains a +// manifest that matches the passed-in digest, and identifies itself as being +// appropriate for running on the system that matches sys. +// If we somehow ended up sharing the same storage among multiple types of +// systems, and managed to download multiple images from the same manifest +// list, their image records will all contain copies of the manifest list, and +// this check will help us decide which of them we want to return when we've +// been asked to resolve an image reference that uses the list's digest to a +// specific image ID. +func imageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool { + // First, check if the image record has a manifest that matches the + // specified digest. + key := manifestBigDataKey(manifestDigest) + manifestBytes, err := store.ImageBigData(img.ID, key) + if err != nil { + return false + } + // The manifest is either a list, or not a list. If it's a list, find + // the digest of the instance that matches the current system, and try + // to load that manifest from the image record, and use it. + manifestType := manifest.GuessMIMEType(manifestBytes) + if manifest.MIMETypeIsMultiImage(manifestType) { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return false + } + manifestDigest, err = list.ChooseInstance(sys) + if err != nil { + return false + } + key = manifestBigDataKey(manifestDigest) + manifestBytes, err = store.ImageBigData(img.ID, key) + if err != nil { + return false + } + manifestType = manifest.GuessMIMEType(manifestBytes) + } + // Load the image's configuration blob. + m, err := manifest.FromBlob(manifestBytes, manifestType) + getConfig := func(blobInfo types.BlobInfo) ([]byte, error) { + return store.ImageBigData(img.ID, blobInfo.Digest.String()) + } + ii, err := m.Inspect(getConfig) + if err != nil { + return false + } + // Build a dummy index containing one instance and information about + // the image's target system from the image's configuration. + index := manifest.OCI1IndexFromComponents([]imgspecv1.Descriptor{{ + MediaType: imgspecv1.MediaTypeImageManifest, + Digest: manifestDigest, + Size: int64(len(manifestBytes)), + Platform: &imgspecv1.Platform{ + OS: ii.Os, + Architecture: ii.Architecture, + }, + }}, nil) + // Check that ChooseInstance() would select this image for this system, + // from a list of images. + instanceDigest, err := index.ChooseInstance(sys) + if err != nil { + return false + } + // Double-check that we can read the runnable image's manifest from the + // image record. + key = manifestBigDataKey(instanceDigest) + _, err = store.ImageBigData(img.ID, key) + return err == nil +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID, and return the image. +func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) { + var loadedImage *storage.Image + if s.id == "" && s.named != nil { + // Look for an image that has the expanded reference name as an explicit Name value. + image, err := s.transport.store.Image(s.named.String()) + if image != nil && err == nil { + loadedImage = image + s.id = image.ID + } + } + if s.id == "" && s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + images, err := s.transport.store.ImagesByDigest(digested.Digest()) + if err == nil && len(images) > 0 { + for _, image := range images { + if imageMatchesRepo(image, s.named) { + if loadedImage == nil || imageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) { + loadedImage = image + s.id = image.ID + } + } + } + } + } + } + if s.id == "" { + logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) + } + if loadedImage == nil { + img, err := s.transport.store.Image(s.id) + if err != nil { + return nil, errors.Wrapf(err, "error reading image %q", s.id) + } + loadedImage = img + } + if s.named != nil { + if !imageMatchesRepo(loadedImage, s.named) { + logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) + return nil, ErrNoSuchImage + } + } + // Default to having the image digest that we hand back match the most recently + // added manifest... + if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { + loadedImage.Digest = digest + } + // ... unless the named reference says otherwise, and it matches one of the digests + // in the image. For those cases, set the Digest field to that value, for the + // sake of older consumers that don't know there's a whole list in there now. + if s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + for _, digest := range loadedImage.Digests { + if digest == digested.Digest() { + loadedImage.Digest = digest + break + } + } + } + } + return loadedImage, nil +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + defaultUIDMap: s.transport.defaultUIDMap, + defaultGIDMap: s.transport.defaultGIDMap, + } +} + +// Return a name with a tag or digest, if we have either, else return it bare. +func (s storageReference) DockerReference() reference.Named { + return s.named +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + optionsList := "" + options := s.transport.store.GraphOptions() + if len(options) > 0 { + optionsList = ":" + strings.Join(options, ",") + } + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +func (s storageReference) PolicyConfigurationIdentity() string { + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" + namespaces := []string{} + if s.named != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.named.String()) + } + tagged, isTagged := s.named.(reference.Tagged) + _, isDigested := s.named.(reference.Digested) + if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. + namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) + } + components := strings.Split(s.named.Name(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, s) +} + +func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + img, err := s.resolveImage(sys) + if err != nil { + return err + } + layers, err := s.transport.store.DeleteImage(img.ID, true) + if err == nil { + logrus.Debugf("deleted image %q", img.ID) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, s) +} + +func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(s) +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go new file mode 100644 index 000000000..62a091da4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -0,0 +1,365 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + minimumTruncatedIDLength = 3 +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetImage retrieves the image from the transport's store that's named + // by the reference. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) + // SetDefaultUIDMap sets the default UID map to use when opening stores. + SetDefaultUIDMap(idmap []idtools.IDMap) + // SetDefaultGIDMap sets the default GID map to use when opening stores. + SetDefaultGIDMap(idmap []idtools.IDMap) + // DefaultUIDMap returns the default UID map used when opening stores. + DefaultUIDMap() []idtools.IDMap + // DefaultGIDMap returns the default GID map used when opening stores. + DefaultGIDMap() []idtools.IDMap +} + +type storageTransport struct { + store storage.Store + defaultUIDMap []idtools.IDMap + defaultGIDMap []idtools.IDMap +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// SetDefaultUIDMap sets the default UID map to use when opening stores. +func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { + s.defaultUIDMap = idmap +} + +// SetDefaultGIDMap sets the default GID map to use when opening stores. +func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { + s.defaultGIDMap = idmap +} + +// DefaultUIDMap returns the default UID map used when opening stores. +func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { + return s.defaultUIDMap +} + +// DefaultGIDMap returns the default GID map used when opening stores. +func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { + return s.defaultGIDMap +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + if ref == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) + } + ref = ref[closeIndex+1:] + } + + // The reference may end with an image ID. Image IDs and digests use the same "@" separator; + // here we only peel away an image ID, and leave digests alone. + split := strings.LastIndex(ref, "@") + id := "" + if split != -1 { + possibleID := ref[split+1:] + if possibleID == "" { + return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) + } + // If it looks like a digest, leave it alone for now. + if _, err := digest.Parse(possibleID); err != nil { + // Otherwise… + if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { + id = possibleID // … it is a full ID + } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { + // … it is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) + } + // We have recognized an image ID; peel it off. + ref = ref[:split] + } + } + + // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + var named reference.Named + // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been + // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. + if ref != "" { + var err error + named, err = reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + named = reference.TagNameOnly(named) + } + + result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) + if err != nil { + return nil, err + } + logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) + return result, nil +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + options, err := storage.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return nil, err + } + options.UIDMap = s.defaultUIDMap + options.GIDMap = s.defaultGIDMap + store, err := storage.GetStore(options) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or +// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", +// tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + var store storage.Store + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + // Peel off a "driver@" from the start. + driverInfo := "" + driverSplit := strings.SplitN(storeSpec, "@", 2) + if len(driverSplit) != 2 { + if storeSpec == "" { + return nil, ErrInvalidReference + } + } else { + driverInfo = driverSplit[0] + if driverInfo == "" { + return nil, ErrInvalidReference + } + storeSpec = driverSplit[1] + if storeSpec == "" { + return nil, ErrInvalidReference + } + } + // Peel off a ":options" from the end. + var options []string + optionsSplit := strings.SplitN(storeSpec, ":", 2) + if len(optionsSplit) == 2 { + options = strings.Split(optionsSplit[1], ",") + storeSpec = optionsSplit[0] + } + // Peel off a "+runroot" from the new end. + runRootInfo := "" + runRootSplit := strings.SplitN(storeSpec, "+", 2) + if len(runRootSplit) == 2 { + runRootInfo = runRootSplit[1] + storeSpec = runRootSplit[0] + } + // The rest is our graph root. + rootInfo := storeSpec + // Check that any paths are absolute paths. + if rootInfo != "" && !filepath.IsAbs(rootInfo) { + return nil, ErrPathNotAbsolute + } + if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: driverInfo, + GraphRoot: rootInfo, + RunRoot: runRootInfo, + GraphDriverOptions: options, + UIDMap: s.defaultUIDMap, + GIDMap: s.defaultGIDMap, + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // We didn't have a store spec, so use the default. + store2, err := s.GetStore() + if err != nil { + return nil, err + } + store = store2 + } + return s.ParseStoreReference(store, reference) +} + +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref != nil { + if img, err := store.Image(dref.String()); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + tmpRef := *sref + if img, err := tmpRef.resolveImage(&types.SystemContext{}); err == nil { + return img, nil + } + } + return nil, storage.ErrImageUnknown +} + +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: scope specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + + fields := strings.SplitN(scope, "@", 3) + switch len(fields) { + case 1: // name only + case 2: // name:tag@ID or name[:tag]@digest + if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { + if _, digestErr := digest.Parse(fields[1]); digestErr != nil { + return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) + } + } + case 3: // name[:tag]@digest@ID + if _, err := digest.Parse(fields[1]); err != nil { + return err + } + if _, err := digest.Parse("sha256:" + fields[2]); err != nil { + return err + } + default: // Coverage: This should never happen + return errors.New("Internal error: unexpected number of fields form strings.SplitN") + } + // As for field[0], if it is non-empty at all: + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go new file mode 100644 index 000000000..ead2d4263 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -0,0 +1,47 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// package main +// +// import ( +// "fmt" +// +// cp "github.com/containers/image/v5/copy" +// "github.com/containers/image/v5/tarball" +// "github.com/containers/image/v5/transports/alltransports" +// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// err = cp.Image(nil, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go new file mode 100644 index 000000000..00150c53b --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -0,0 +1,94 @@ +package tarball + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/types" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + transport types.ImageTransport + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + for k, v := range annotations { + r.annotations[k] = v + } + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return r.transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx, sys) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %v", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go new file mode 100644 index 000000000..694ad17bd --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -0,0 +1,274 @@ +package tarball + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/v5/types" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type tarballImageSource struct { + reference tarballReference + filenames []string + diffIDs []digest.Digest + diffSizes []int64 + blobIDs []digest.Digest + blobSizes []int64 + blobTypes []string + config []byte + configID digest.Digest + configSize int64 + manifest []byte +} + +func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + // Gather up the digests, sizes, and date information for all of the files. + filenames := []string{} + diffIDs := []digest.Digest{} + diffSizes := []int64{} + blobIDs := []digest.Digest{} + blobSizes := []int64{} + blobTimes := []time.Time{} + blobTypes := []string{} + for _, filename := range r.filenames { + var file *os.File + var err error + var blobSize int64 + var blobTime time.Time + var reader io.Reader + if filename == "-" { + blobSize = int64(len(r.stdin)) + blobTime = time.Now() + reader = bytes.NewReader(r.stdin) + } else { + file, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + } + blobSize = fileinfo.Size() + blobTime = fileinfo.ModTime() + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := pgzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + n, err := io.Copy(ioutil.Discard, reader) + if err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + filenames = append(filenames, filename) + diffIDs = append(diffIDs, diffIDdigester.Digest()) + diffSizes = append(diffSizes, n) + blobIDs = append(blobIDs, blobIDdigester.Digest()) + blobSizes = append(blobSizes, blobSize) + blobTimes = append(blobTimes, blobTime) + blobTypes = append(blobTypes, layerType) + } + + // Build the rootfs and history for the configuration blob. + rootfs := imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + created := time.Time{} + history := []imgspecv1.History{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + for i := range diffIDs { + createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) + history = append(history, imgspecv1.History{ + Created: &blobTimes[i], + CreatedBy: createdBy, + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTimes[i]) { + created = blobTimes[i] + } + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = rootfs + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + configSize := int64(len(configBytes)) + + // Populate a manifest with the configuration blob and the file as the single layer. + layerDescriptors := []imgspecv1.Descriptor{} + for i := range blobIDs { + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobIDs[i], + Size: blobSizes[i], + MediaType: blobTypes[i], + }) + } + annotations := make(map[string]string) + for k, v := range r.annotations { + annotations[k] = v + } + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: configSize, + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: annotations, + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + reference: *r, + filenames: filenames, + diffIDs: diffIDs, + diffSizes: diffSizes, + blobIDs: blobIDs, + blobSizes: blobSizes, + blobTypes: blobTypes, + config: configBytes, + configID: configID, + configSize: configSize, + manifest: manifestBytes, + } + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (is *tarballImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + // We should only be asked about things in the manifest. Maybe the configuration blob. + if blobinfo.Digest == is.configID { + return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + } + // Maybe one of the layer blobs. + for i := range is.blobIDs { + if blobinfo.Digest == is.blobIDs[i] { + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + } + return reader, is.blobSizes[i], nil + } + } + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return nil, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go new file mode 100644 index 000000000..113545cb7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -0,0 +1,66 @@ +package tarball + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + ref := &tarballReference{ + transport: t, + filenames: filenames, + stdin: stdin, + } + return ref, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go new file mode 100644 index 000000000..2110a091d --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -0,0 +1,46 @@ +package alltransports + +import ( + "strings" + + // register all known transports + // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating + // a transport. + _ "github.com/containers/image/v5/directory" + _ "github.com/containers/image/v5/docker" + _ "github.com/containers/image/v5/docker/archive" + _ "github.com/containers/image/v5/oci/archive" + _ "github.com/containers/image/v5/oci/layout" + _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/tarball" + // The ostree transport is registered by ostree*.go + // The storage transport is registered by storage*.go + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// ParseImageName converts a URL-like image name to a types.ImageReference. +func ParseImageName(imgName string) (types.ImageReference, error) { + // Keep this in sync with TransportFromImageName! + parts := strings.SplitN(imgName, ":", 2) + if len(parts) != 2 { + return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) + } + transport := transports.Get(parts[0]) + if transport == nil { + return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) + } + return transport.ParseReference(parts[1]) +} + +// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when +// the transport is unknown or when the input is invalid. +func TransportFromImageName(imageName string) types.ImageTransport { + // Keep this in sync with ParseImageName! + parts := strings.SplitN(imageName, ":", 2) + if len(parts) == 2 { + return transports.Get(parts[0]) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go new file mode 100644 index 000000000..82224052e --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go @@ -0,0 +1,8 @@ +// +build !containers_image_docker_daemon_stub + +package alltransports + +import ( + // Register the docker-daemon transport + _ "github.com/containers/image/v5/docker/daemon" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go new file mode 100644 index 000000000..d13700799 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_docker_daemon_stub + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("docker-daemon")) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go new file mode 100644 index 000000000..72432d1ef --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go @@ -0,0 +1,8 @@ +// +build containers_image_ostree,linux + +package alltransports + +import ( + // Register the ostree transport + _ "github.com/containers/image/v5/ostree" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go new file mode 100644 index 000000000..f4a862bd4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go @@ -0,0 +1,9 @@ +// +build !containers_image_ostree !linux + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("ostree")) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go new file mode 100644 index 000000000..7041eb876 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go @@ -0,0 +1,8 @@ +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/v5/storage" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go new file mode 100644 index 000000000..67f0291cc --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/image/v5/transports/stub.go b/vendor/github.com/containers/image/v5/transports/stub.go new file mode 100644 index 000000000..2c186a90c --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/stub.go @@ -0,0 +1,36 @@ +package transports + +import ( + "fmt" + + "github.com/containers/image/v5/types" +) + +// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +type stubTransport string + +// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +func NewStubTransport(name string) types.ImageTransport { + return stubTransport(name) +} + +// Name returns the name of the transport, which must be unique among other transports. +func (s stubTransport) Name() string { + return string(s) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { + return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { + // Allowing any reference in here allows tools with some transports stubbed-out to still + // use signature verification policies which refer to these stubbed-out transports. + // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go new file mode 100644 index 000000000..46ee3710f --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/transports.go @@ -0,0 +1,90 @@ +package transports + +import ( + "fmt" + "sort" + "sync" + + "github.com/containers/image/v5/types" +) + +// knownTransports is a registry of known ImageTransport instances. +type knownTransports struct { + transports map[string]types.ImageTransport + mu sync.Mutex +} + +func (kt *knownTransports) Get(k string) types.ImageTransport { + kt.mu.Lock() + t := kt.transports[k] + kt.mu.Unlock() + return t +} + +func (kt *knownTransports) Remove(k string) { + kt.mu.Lock() + delete(kt.transports, k) + kt.mu.Unlock() +} + +func (kt *knownTransports) Add(t types.ImageTransport) { + kt.mu.Lock() + defer kt.mu.Unlock() + name := t.Name() + if t := kt.transports[name]; t != nil { + panic(fmt.Sprintf("Duplicate image transport name %s", name)) + } + kt.transports[name] = t +} + +var kt *knownTransports + +func init() { + kt = &knownTransports{ + transports: make(map[string]types.ImageTransport), + } +} + +// Get returns the transport specified by name or nil when unavailable. +func Get(name string) types.ImageTransport { + return kt.Get(name) +} + +// Delete deletes a transport from the registered transports. +func Delete(name string) { + kt.Remove(name) +} + +// Register registers a transport. +func Register(t types.ImageTransport) { + kt.Add(t) +} + +// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that +// ParseImageName(ImageName(reference)) returns an equivalent reference. +// +// This is the generally recommended way to refer to images in the UI. +// +// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +func ImageName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.StringWithinTransport() +} + +// ListNames returns a list of non deprecated transport names. +// Deprecated transports can be used, but are not presented to users. +func ListNames() []string { + kt.mu.Lock() + defer kt.mu.Unlock() + deprecated := map[string]bool{ + "atomic": true, + } + var names []string + for _, transport := range kt.transports { + if !deprecated[transport.Name()] { + names = append(names, transport.Name()) + } + } + sort.Strings(names) + return names +} diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go new file mode 100644 index 000000000..2db8c7827 --- /dev/null +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -0,0 +1,555 @@ +package types + +import ( + "context" + "io" + "time" + + "github.com/containers/image/v5/docker/reference" + compression "github.com/containers/image/v5/pkg/compression/types" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTransport is a top-level namespace for ways to to store/load an image. +// It should generally correspond to ImageSource/ImageDestination implementations. +// +// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. +// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS +// (or, even, IPv4 or IPv6). +// +// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. +// For example, several different ImageTransport implementations may be based on local filesystem paths, +// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) +// +// See also transports.KnownTransports. +type ImageTransport interface { + // Name returns the name of the transport, which must be unique among other transports. + Name() string + // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. + ParseReference(reference string) (ImageReference, error) + // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys + // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). + // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. + // scope passed to this function will not be "", that value is always allowed. + ValidatePolicyConfigurationScope(scope string) error +} + +// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. +// +// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening +// within an ImageTransport.ParseReference() or equivalent API creating the reference object. +// That's also why the various identification/formatting methods of this type do not support returning errors. +// +// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside +// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. +type ImageReference interface { + Transport() ImageTransport + // StringWithinTransport returns a string representation of the reference, which MUST be such that + // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. + // NOTE: The returned string is not promised to be equal to the original input to ParseReference; + // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; + // instead, see transports.ImageName(). + StringWithinTransport() string + + // DockerReference returns a Docker reference associated with this reference + // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, + // not e.g. after redirect or alias processing), or nil if unknown/not applicable. + DockerReference() reference.Named + + // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. + // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; + // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical + // (i.e. various references with exactly the same semantics should return the same configuration identity) + // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but + // not required/guaranteed that it will be a valid input to Transport().ParseReference(). + // Returns "" if configuration identities for these references are not supported. + PolicyConfigurationIdentity() string + + // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search + // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed + // in order, terminating on first match, and an implicit "" is always checked at the end. + // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), + // and each following element to be a prefix of the element preceding it. + PolicyConfigurationNamespaces() []string + + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) + // NewImageDestination returns a types.ImageDestination for this reference. + // The caller must call .Close() on the returned ImageDestination. + NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) + + // DeleteImage deletes the named image from the registry, if supported. + DeleteImage(ctx context.Context, sys *SystemContext) error +} + +// LayerCompression indicates if layers must be compressed, decompressed or preserved +type LayerCompression int + +const ( + // PreserveOriginal indicates the layer must be preserved, ie + // no compression or decompression. + PreserveOriginal LayerCompression = iota + // Decompress indicates the layer must be decompressed + Decompress + // Compress indicates the layer must be compressed + Compress +) + +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string + Annotations map[string]string + MediaType string + // CompressionOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer should be preserved or (de)compressed. The + // field defaults to preserve the original layer. + CompressionOperation LayerCompression + // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct + // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be + // set when `CompressionOperation == Compress`. + CompressionAlgorithm *compression.Algorithm +} + +// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cahce should just fall back to copying the blobs the usual way. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). +// This is primarily useful for copying images around; for examining their properties, Image (below) +// is usually more useful. +// Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. +type ImageSource interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Close removes resources associated with an initialized ImageSource, if any. + Close() error + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). + // It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool + // GetSignatures returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer + // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() + // to read the image's layers. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error) +} + +// ImageDestination is a service, possibly remote (= slow), to store components of a single image. +// +// There is a specific required order for some of the calls: +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) +// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. +// +// Each ImageDestination should eventually be closed by calling Close(). +type ImageDestination interface { + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, + // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. + Reference() ImageReference + // Close removes resources associated with an initialized ImageDestination, if any. + Close() error + + // SupportedManifestMIMETypes tells which manifest mime types the destination supports + // If an empty slice or nil it's returned, then any mime type can be tried to upload + SupportedManifestMIMETypes() []string + // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. + // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. + SupportsSignatures(ctx context.Context) error + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression() LayerCompression + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. + MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + + // PutBlob writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // May update cache. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + // PutManifest writes manifest to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated + // by `manifest.Digest()`. + // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. + // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), + // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. + PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error + // PutSignatures writes a set of signatures to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // MUST be called after PutManifest (signatures may reference manifest contents). + PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error + // Commit marks the process of storing the image as successful and asks for the image to be persisted. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before Commit() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) + Commit(ctx context.Context, unparsedToplevel UnparsedImage) error +} + +// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, +// refuses specifically this manifest type, but may accept a different manifest type. +type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ManifestTypeRejectedError) Error() string { + return e.Err.Error() +} + +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +type UnparsedImage interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. + Manifest(ctx context.Context) ([]byte, string, error) + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. + Signatures(ctx context.Context) ([][]byte, error) +} + +// Image is the primary API for inspecting properties of images. +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*v1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(context.Context) ([]BlobInfo, error) + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + EmbeddedDockerReference reference.Named + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. +} + +// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. +// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported +// for other manifest types. +type ImageInspectInfo struct { + Tag string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Os string + Layers []string + Env []string +} + +// DockerAuthConfig contains authorization information for connecting to a registry. +// the value of Username and Password can be empty for accessing the registry anonymously +type DockerAuthConfig struct { + Username string + Password string +} + +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b == true { + o = OptionalBoolTrue + } + return o +} + +// SystemContext allows parameterizing access to implicitly-accessed resources, +// like configuration files in /etc and users' login state in their home directory. +// Various components can share the same field only if their semantics is exactly +// the same; if in doubt, add a new field. +// It is always OK to pass nil instead of a SystemContext. +type SystemContext struct { + // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). + // Not used for any of the more specific path overrides available in this struct. + // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). + // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . + // and there is no need to worry about the environment.) + // NOTE: This does NOT affect paths starting by $HOME. + RootForImplicitAbsolutePaths string + + // === Global configuration overrides === + // If not "", overrides the system's default path for signature.Policy configuration. + SignaturePolicyPath string + // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) + RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // If not "", overrides the default path for the authentication file, but only new format files + AuthFilePath string + // if not "", overrides the default path for the authentication file, but with the legacy format; + // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; + // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) + // in locations other than the home dir; openshift components should then set this field in those cases; + // this field is ignored if `AuthFilePath` is set (we favor the newer format); + // only reading of this data is supported; + LegacyFormatAuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string + + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool + + // === docker.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when talking to a Docker Registry. + DockerCertPath string + // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. + // Ignored if DockerCertPath is non-empty. + DockerPerHostCertDirPath string + // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify OptionalBool + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + DockerAuthConfig *DockerAuthConfig + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool + // Directory to use for OSTree temporary files + OSTreeTmpDirPath string + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int +} + +// ProgressProperties is used to pass information from the copy code to a monitor which +// can use the real-time information to produce output or react to changes. +type ProgressProperties struct { + Artifact BlobInfo + Offset uint64 +} diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go new file mode 100644 index 000000000..572be2b89 --- /dev/null +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -0,0 +1,18 @@ +package version + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 5 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE index 0c74e15b0..58b19b6d1 100644 --- a/vendor/github.com/docker/docker/NOTICE +++ b/vendor/github.com/docker/docker/NOTICE @@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). -This product contains software (https://github.com/kr/pty) developed +This product contains software (https://github.com/creack/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 38ca5329e..cc2451f03 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -3287,7 +3287,7 @@ definitions:


- - "ingress" makes the target port accessible on on every node, + - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on @@ -3305,8 +3305,8 @@ definitions: type: "object" properties: Mode: - description: "The mode of resolution to use for internal load balancing - between tasks." + description: | + The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" @@ -4873,7 +4873,7 @@ paths: Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: - When pausing a container (on Linux), the cgroups freezer is used to suspend + When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. @@ -5543,8 +5543,6 @@ paths: description: "no error" 304: description: "container already started" - schema: - $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: @@ -5576,8 +5574,6 @@ paths: description: "no error" 304: description: "container already stopped" - schema: - $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: @@ -5768,9 +5764,9 @@ paths: post: summary: "Pause a container" description: | - Use the cgroups freezer to suspend all processes in a container. + Use the freezer cgroup to suspend all processes in a container. - Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: @@ -6493,10 +6489,11 @@ paths: type: "string" - name: "networkmode" in: "query" - description: "Sets the networking mode for the run commands during - build. Supported standard values are: `bridge`, `host`, `none`, and - `container:`. Any other value is taken as a custom network's - name to which this container should connect to." + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. type: "string" - name: "Content-type" in: "header" @@ -9585,17 +9582,19 @@ paths: type: "integer" - name: "registryAuthFrom" in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. type: "string" - description: "If the X-Registry-Auth header is not specified, this - parameter indicates where to find registry authorization credentials. The - valid values are `spec` and `previous-spec`." + enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. type: "string" - description: "Set to this parameter to `previous` to cause a - server-side rollback to the previous service spec. The supplied spec will be - ignored in this case." - name: "X-Registry-Auth" in: "header" description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 654c88106..209f33eb9 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) // CgroupnsMode represents the cgroup namespace mode of the container diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 1f75403f7..2e24e769c 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -57,7 +57,7 @@ func ToJSON(a Args) (string, error) { // then the encoded format will use an older legacy format where the values are a // list of strings, instead of a set. // -// Deprecated: Use ToJSON +// Deprecated: do not use in any new code; use ToJSON instead func ToParamWithVersion(version string, a Args) (string, error) { if a.Len() == 0 { return "", nil diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 8789ad3b3..53e47084c 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -4,7 +4,7 @@ import ( "encoding/json" "net" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index b13d9c4c7..4cf9a95ff 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -39,6 +39,7 @@ type ImageInspect struct { Author string Config *container.Config Architecture string + Variant string `json:",omitempty"` Os string OsVersion string `json:",omitempty"` Size int64 diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index b63d4d6d4..0649a69cc 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -252,7 +252,8 @@ func (cli *Client) DaemonHost() string { // HTTPClient returns a copy of the HTTP client bound to the server func (cli *Client) HTTPClient() *http.Client { - return &*cli.client + c := *cli.client + return &c } // ParseHostURL parses a url string, validates the string is a host url, and diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index 1e7a63a9c..c099d80e2 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -35,6 +35,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis } if options.Filters.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index 6e5653895..f347cadf1 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -90,6 +90,7 @@ func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url } if options.Filters.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index e9c9a752f..e77084af6 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -87,6 +87,8 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto // Server hijacks the connection, error 'connection closed' expected resp, err := clientconn.Do(req) + + //lint:ignore SA1019 for connecting to old (pre go1.8) daemons if err != httputil.ErrPersistEOF { if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index 4fa8c006b..a5bc4b095 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -24,6 +24,7 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions } } if optionFilters.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) if err != nil { return images, err diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index 7130c1364..8ca7eb612 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -13,6 +13,7 @@ import ( func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { query := url.Values{} if options.Filters.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index 8285cecd6..a51c930e6 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -15,6 +15,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P query := url.Values{} if filter.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, filter) if err != nil { return plugins, err diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 3078335e2..144c41636 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -50,15 +50,6 @@ func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, b return cli.sendRequest(ctx, "POST", path, query, body, headers) } -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, "PUT", path, query, body, headers) -} - // putRaw sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, "PUT", path, query, body, headers) @@ -178,7 +169,13 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp // this is localised - for example in French the error would be // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.") + } else { + f.Close() + err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.") + } } return serverResp, errors.Wrap(err, "error during connect") diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 620fc6cff..56bfe55b7 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -9,7 +9,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 2380d5638..d68fc2b98 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -15,6 +15,7 @@ func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumet query := url.Values{} if filter.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, filter) if err != nil { return volumes, err diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index ac9bf6d33..07552f1cc 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" + containerderrors "github.com/containerd/containerd/errdefs" "github.com/docker/distribution/registry/api/errcode" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" @@ -47,6 +48,10 @@ func GetHTTPErrorStatusCode(err error) int { if statusCode != http.StatusInternalServerError { return statusCode } + statusCode = statusCodeFromContainerdError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } statusCode = statusCodeFromDistributionError(err) if statusCode != http.StatusInternalServerError { return statusCode @@ -136,9 +141,6 @@ func statusCodeFromGRPCError(err error) int { case codes.Unavailable: // code 14 return http.StatusServiceUnavailable default: - if e, ok := err.(causer); ok { - return statusCodeFromGRPCError(e.Cause()) - } // codes.Canceled(1) // codes.Unknown(2) // codes.DeadlineExceeded(4) @@ -163,10 +165,27 @@ func statusCodeFromDistributionError(err error) int { } case errcode.ErrorCoder: return errs.ErrorCode().Descriptor().HTTPStatusCode - default: - if e, ok := err.(causer); ok { - return statusCodeFromDistributionError(e.Cause()) - } } return http.StatusInternalServerError } + +// statusCodeFromContainerdError returns status code for containerd errors when +// consumed directly (not through gRPC) +func statusCodeFromContainerdError(err error) int { + switch { + case containerderrors.IsInvalidArgument(err): + return http.StatusBadRequest + case containerderrors.IsNotFound(err): + return http.StatusNotFound + case containerderrors.IsAlreadyExists(err): + return http.StatusConflict + case containerderrors.IsFailedPrecondition(err): + return http.StatusPreconditionFailed + case containerderrors.IsUnavailable(err): + return http.StatusServiceUnavailable + case containerderrors.IsNotImplemented(err): + return http.StatusNotImplemented + default: + return http.StatusInternalServerError + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 000000000..7307d9694 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 000000000..cbcf86532 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1294 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +var unpigzPath string + +func init() { + if path, err := exec.LookPath("unpigz"); err != nil { + logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") + } else { + logrus.Debugf("Using unpigz binary found at path %s", path) + unpigzPath = path + } +} + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.Identity + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMapping *idtools.IdentityMapping +} + +// NewDefaultArchiver returns a new Archiver without any IdentityMapping +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") + if disablePigzEnv != "" { + if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { + return nil, err + } else if disablePigz { + return gzip.NewReader(buf) + } + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + hdr.Name = canonicalTarName(name, fi.IsDir()) + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IdentityMapping *idtools.IdentityMapping + ChownOpts *idtools.Identity + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IdentityMapping: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) string { + name = CanonicalTarNameForPath(name) + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //check whether the file is overlayfs whiteout + //if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP || err == syscall.EPERM { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + // EPERM occurs if modifying xattrs is not allowed. This can + // happen when running in userns with restrictions (ChromeOS). + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMapping.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMapping, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMapping.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { + ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Ensure the command has exited before we clean anything up + done := make(chan struct{}) + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(done) + }() + + return ioutils.NewReadCloserWrapper(pipeR, func() error { + // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as + // cmd.Wait waits for any non-file stdout/stderr/stdin to close. + err := pipeR.Close() + <-done + return err + }), nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 000000000..0601f7b0d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,261 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{inUserNS: inUserNS} + } + return nil +} + +type overlayWhiteoutConverter struct { + inUserNS bool +} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + if err != nil { + if c.inUserNS { + if err = replaceDirWithOverlayOpaque(dir); err != nil { + return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir) + } + } else { + return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) + } + } + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + if c.inUserNS { + // Ubuntu and a few distros support overlayfs in userns. + // + // Although we can't call mknod directly in userns (at least on bionic kernel 4.15), + // we can still create 0,0 char device using mknodChar0Overlay(). + // + // NOTE: we don't need this hack for the containerd snapshotter+unpack model. + if err := mknodChar0Overlay(originalPath); err != nil { + return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath) + } + } else { + return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) + } + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking. +// This function can be used for creating 0,0 char device in userns on Ubuntu. +// +// Steps: +// * Mkdir lower,upper,merged,work +// * Create lower/dummy +// * Mount overlayfs +// * Unlink merged/dummy +// * Unmount overlayfs +// * Make sure a 0,0 char device is created as upper/dummy +// * Rename upper/dummy to cleansedOriginalPath +func mknodChar0Overlay(cleansedOriginalPath string) error { + dir := filepath.Dir(cleansedOriginalPath) + tmp, err := ioutil.TempDir(dir, "mc0o") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil { + return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return errors.Wrapf(err, "failed to unlink %s", mergedDummy) + } + if err := syscall.Unmount(merged, 0); err != nil { + return errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + if err := isChar0(upperDummy); err != nil { + return err + } + if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil { + return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath) + } + return nil +} + +func isChar0(path string) error { + osStat, err := os.Stat(path) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", path) + } + st, ok := osStat.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("got unsupported stat for %s", path) + } + if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR { + return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode) + } + if st.Rdev != 0 { + return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev) + } + return nil +} + +// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque +// xattr. The contents of the directory are preserved. +func replaceDirWithOverlayOpaque(path string) error { + if path == "/" { + return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"") + } + dir := filepath.Dir(path) + tmp, err := ioutil.TempDir(dir, "rdwoo") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + // newPath is a new empty directory crafted with trusted.overlay.opaque xattr. + // we copy the content of path into newPath, remove path, and rename newPath to path. + newPath, err := createDirWithOverlayOpaque(tmp) + if err != nil { + return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp) + } + if err := fs.CopyDir(newPath, path); err != nil { + return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path) + } + if err := os.RemoveAll(path); err != nil { + return err + } + return os.Rename(newPath, path) +} + +// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr, +// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu. +func createDirWithOverlayOpaque(tmp string) (string, error) { + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := os.MkdirAll(lowerDummy, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy) + } + // upperDummy becomes a 0,0-char device file here + if err := os.Mkdir(mergedDummy, 0700); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy) + } + // upperDummy becomes a directory with trusted.overlay.opaque xattr + // (but can't be verified in userns) + if err := syscall.Unmount(merged, 0); err != nil { + return "", errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + return upperDummy, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 000000000..65a73354c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 000000000..d62633603 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,115 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return p // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 000000000..ae6b89fd7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,67 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return filepath.ToSlash(p) +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + // no notion of file ownership mapping yet on Windows + return idtools.Identity{UID: 0, GID: 0}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 000000000..aedb91b03 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,445 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar doesn't have sub-second mtime precision. The go tar +// writer (1.10+) does when using PAX format, but we round times to seconds +// to ensure archives have the same hashes for backwards compatibility. +// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. +// +// Non-sub-second is problematic when we apply changes via tar +// files. We handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 000000000..f8792b3d4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,286 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 000000000..ba744741c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 000000000..06217b716 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,43 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size or modification time for dirs, its not a good + // measure of change. See https://github.com/moby/moby/issues/9874 + // for a description of the issue with modification time, and + // https://github.com/moby/moby/pull/11422 for the change. + // (Note that in the Windows implementation of this function, + // modification time IS taken as a change). See + // https://github.com/moby/moby/pull/37982 for more information. + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 000000000..9906685e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,34 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Note there is slight difference between the Linux and Windows + // implementations here. Due to https://github.com/moby/moby/issues/9874, + // and the fix at https://github.com/moby/moby/pull/11422, Linux does not + // consider a change to the directory time as a change. Windows on NTFS + // does. See https://github.com/moby/moby/pull/37982 for more information. + + if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 000000000..57fddac07 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,480 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { + // Ensure paths are in platform semantics + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(sep) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path, os.PathSeparator): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + // srcContent tar stream, as served by TarWithOptions(), is + // definitely in PAX format, but tar.Next() mistakenly guesses it + // as USTAR, which creates a problem: if the newBase is >100 + // characters long, WriteHeader() returns an error like + // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". + // + // To fix, set the format to PAX here. See docker/for-linux issue #484. + hdr.Format = tar.FormatPAX + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 000000000..3958364f5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 000000000..a878d1bac --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 000000000..27897e6ab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,260 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMapping, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + if runtime.GOOS != "windows" { + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) + } + + if decompress { + decompLayer, err := DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompLayer.Close() + layer = decompLayer + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 000000000..495db809e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 000000000..797143ee8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = (1 << 30) - 2 + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 000000000..f58bf227f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 000000000..4c072a87e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 000000000..85435694c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index 47ecd0c09..5e6310fdc 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -5,24 +5,8 @@ import ( "os" "path/filepath" "strings" - - "github.com/docker/docker/pkg/idtools" ) -// GetStatic returns the home directory for the current user without calling -// os/user.Current(). This is useful for static-linked binary on glibc-based -// system, because a call to os/user.Current() in a static binary leads to -// segfault due to a glibc issue that won't be fixed in a short term. -// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) -func GetStatic() (string, error) { - uid := os.Getuid() - usr, err := idtools.LookupUID(uid) - if err != nil { - return "", err - } - return usr.Home, nil -} - // GetRuntimeDir returns XDG_RUNTIME_DIR. // XDG_RUNTIME_DIR is typically configured via pam_systemd. // GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index f0a363ded..67ab9e9b3 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -6,12 +6,6 @@ import ( "errors" ) -// GetStatic is not needed for non-linux systems. -// (Precisely, it is needed only for glibc-based linux systems.) -func GetStatic() (string, error) { - return "", errors.New("homedir.GetStatic() is not supported on this system") -} - // GetRuntimeDir is unsupported on non-linux system. func GetRuntimeDir() (string, error) { return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index d85e12448..284e8be7c 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -4,8 +4,7 @@ package homedir // import "github.com/docker/docker/pkg/homedir" import ( "os" - - "github.com/opencontainers/runc/libcontainer/user" + "os/user" ) // Key returns the env var name for the user's home dir based on @@ -17,11 +16,13 @@ func Key() string { // Get returns the home directory of the current user with the help of // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. +// If compiling statically, ensure the osusergo build tag is used. +// If needing to do nss lookups, do not compile statically. func Get() string { home := os.Getenv(Key()) if home == "" { - if u, err := user.CurrentUser(); err == nil { - return u.Home + if u, err := user.Current(); err == nil { + return u.HomeDir } } return home diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index fb239743a..3981ff64d 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -59,7 +59,7 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting paths = append(paths, dirPath) } } - if err := system.MkdirAll(path, mode, ""); err != nil { + if err := system.MkdirAll(path, mode); err != nil { return err } } else { diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index 4ae38a1b1..35ede0fff 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -11,7 +11,7 @@ import ( // Ownership is handled elsewhere, but in the future could be support here // too. func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode, ""); err != nil { + if err := system.MkdirAll(path, mode); err != nil { return err } return nil diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 000000000..6d6640898 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,283 @@ +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/term" + units "github.com/docker/go-units" + "github.com/morikuni/aec" +) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int +} + +func (p *JSONProgress) String() string { + var ( + width = p.width() + pbBox string + numbersBox string + timeLeftBox string + ) + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := p.now().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) +} + +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) +} + +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) +} + +// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the +// entire current line when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("authentication is required") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) + endl = "\r" + fmt.Fprintf(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]uint) + ) + + for { + var diff uint + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = uint(len(ids)) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]uint) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go index 3d68800eb..f3f46055e 100644 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -587,9 +587,6 @@ var ( // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf "mestorf", - // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky - "minsky", - // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani "mirzakhani", @@ -737,9 +734,6 @@ var ( // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence "spence", - // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman - "stallman", - // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker "stonebraker", diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go index 6e599eebc..6a302dcee 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -9,7 +9,7 @@ import ( "os/exec" "strings" - "github.com/mattn/go-shellwords" + shellwords "github.com/mattn/go-shellwords" ) // GetKernelVersion gets the current kernel version. diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go index b7b15a1fd..a04763872 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -36,7 +36,7 @@ func GetKernelVersion() (*VersionInfo, error) { } KVI.kvi = blex - // Important - docker.exe MUST be manifested for this API to return + // Important - dockerd.exe MUST be manifested for this API to return // the correct information. dwVersion, err := windows.GetVersion() if err != nil { @@ -44,7 +44,7 @@ func GetKernelVersion() (*VersionInfo, error) { } KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.minor = int((dwVersion & 0xFF00) >> 8) KVI.build = int((dwVersion & 0xFFFF0000) >> 16) return KVI, nil diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go deleted file mode 100644 index b2139b60e..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go +++ /dev/null @@ -1,14 +0,0 @@ -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "golang.org/x/sys/unix" -) - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go index 46339c282..3b978fd3b 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -72,6 +72,7 @@ func (bp *bufferPool) Get() []byte { } func (bp *bufferPool) Put(b []byte) { + //nolint:staticcheck // TODO changing this to a pointer makes tests fail. Investigate if we should change or not (otherwise remove this TODO) bp.pool.Put(b) } diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index adeb16305..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return MkdirAll(path, perm, sddl) -} - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode, sddl string) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -// The functions below here are wrappers for the equivalents in the os and ioutils packages. -// They are passthrough on Unix platforms, and only relevant on Windows. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return os.Create(name) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// TempFileSequential creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - return ioutil.TempFile(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go new file mode 100644 index 000000000..dcee3e9f9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -0,0 +1,67 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 3049ff38a..7cebd6efc 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -26,9 +26,10 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { return mkdirall(path, true, sddl) } -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode, sddl string) error { - return mkdirall(path, false, sddl) +// MkdirAll implementation that is volume path aware for Windows. It can be used +// as a drop-in replacement for os.MkdirAll() +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false, "") } // mkdirall is a custom version of os.MkdirAll modified for use on Windows diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index d79e8b076..cd060eff2 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a @@ -27,6 +27,7 @@ func ReadMemInfo() (*MemInfo, error) { func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo := &MemInfo{} scanner := bufio.NewScanner(reader) + memAvailable := int64(-1) for scanner.Scan() { // Expected format: ["MemTotal:", "1234", "kB"] parts := strings.Fields(scanner.Text()) @@ -48,6 +49,8 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo.MemTotal = bytes case "MemFree:": meminfo.MemFree = bytes + case "MemAvailable:": + memAvailable = bytes case "SwapTotal:": meminfo.SwapTotal = bytes case "SwapFree:": @@ -55,6 +58,9 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { } } + if memAvailable != -1 { + meminfo.MemFree = memAvailable + } // Handle errors that may have occurred during the reading of the file. if err := scanner.Err(); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go index a3d957afa..64e892289 100644 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -5,8 +5,6 @@ import ( "path/filepath" "runtime" "strings" - - "github.com/containerd/continuity/pathdriver" ) const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" @@ -27,6 +25,12 @@ func DefaultPathEnv(os string) string { } +// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter +// actually uses in order to avoid system depending on containerd/continuity. +type PathVerifier interface { + IsAbs(string) bool +} + // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. // On Linux: this is a no-op. @@ -42,7 +46,7 @@ func DefaultPathEnv(os string) string { // a --> a // /a --> \a // d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { +func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { if runtime.GOOS != "windows" || LCOWSupported() { return path, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 98c9eb18d..17d5d131a 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -8,7 +8,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: s.Rdev, + // the type is 32bit on mips + rdev: uint64(s.Rdev), // nolint: unconvert mtim: s.Mtim}, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go deleted file mode 100644 index 756b92d1e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index 4ae92fa6c..eb19f9c85 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -55,7 +55,6 @@ var ( ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") ) @@ -85,7 +84,7 @@ type osVersionInfoEx struct { } // GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. +// dockerd.exe must be manifested to get the correct version information. func GetOSVersion() OSVersion { var err error osv := OSVersion{} @@ -118,22 +117,6 @@ func IsWindowsClient() bool { return osviex.ProductType == verNTWorkstation } -// IsIoTCore returns true if the currently running image is based off of -// Windows 10 IoT Core. -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsIoTCore() bool { - var returnedProductType uint32 - r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) - if r1 == 0 { - logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) - return false - } - const productIoTUAP = 0x0000007B - const productIoTUAPCommercial = 0x00000083 - return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial -} - // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go deleted file mode 100644 index ed1b9fad5..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,24 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index 0afe85458..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD - - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go new file mode 100644 index 000000000..61ba8c474 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + uts := []unix.Timespec{ + unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])), + unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])), + } + err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW) + if err != nil && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go index a3c3db131..6e83b59e9 100644 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -7,7 +7,7 @@ import ( "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/term/windows" + windowsconsole "github.com/docker/docker/pkg/term/windows" ) // State holds the console mode for the terminal. diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go index 3e5593ca6..7e8f265d4 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -1,3 +1,4 @@ +// +build windows // These files implement ANSI-aware input and output streams for use by the Docker Windows client. // When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create // and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. @@ -9,7 +10,7 @@ import ( "os" "sync" - "github.com/Azure/go-ansiterm" + ansiterm "github.com/Azure/go-ansiterm" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json index 250a03e13..71ac412df 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/default.json +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -167,6 +167,9 @@ "ioprio_set", "io_setup", "io_submit", + "io_uring_enter", + "io_uring_register", + "io_uring_setup", "ipc", "kill", "lchown", @@ -314,6 +317,7 @@ "sigaltstack", "signalfd", "signalfd4", + "sigprocmask", "sigreturn", "socket", "socketcall", diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go index 9f222a6ee..12721a120 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -143,20 +143,18 @@ Loop: } if call.Name != "" { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args)) - } - - for _, n := range call.Names { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args)) + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args)) + } else { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args)) } } return newConfig, nil } -func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall { +func createSpecsSyscall(names []string, action types.Action, args []*types.Arg) specs.LinuxSyscall { newCall := specs.LinuxSyscall{ - Names: []string{name}, + Names: names, Action: specs.LinuxSeccompAction(action), } diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go index 53333f43e..16148b408 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -160,6 +160,9 @@ func DefaultProfile() *types.Seccomp { "ioprio_set", "io_setup", "io_submit", + "io_uring_enter", + "io_uring_register", + "io_uring_setup", "ipc", "kill", "lchown", @@ -307,6 +310,7 @@ func DefaultProfile() *types.Seccomp { "sigaltstack", "signalfd", "signalfd4", + "sigprocmask", "sigreturn", "socket", "socketcall", diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitattributes b/vendor/github.com/fsouza/go-dockerclient/.gitattributes new file mode 100644 index 000000000..6313b56c5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore index ef22245ea..5f6b48eae 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.gitignore +++ b/vendor/github.com/fsouza/go-dockerclient/.gitignore @@ -1,4 +1,2 @@ # temporary symlink for testing testing/data/symlink -Gopkg.lock -vendor/ diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml new file mode 100644 index 000000000..aa3ab39e5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml @@ -0,0 +1,29 @@ +run: + deadline: 5m + + skip-dirs: + - internal + +issues: + exclude-rules: + - path: _test\.go + linters: + - bodyclose + - goconst + - gosec + - scopelint + - path: testing[/\\].+\.go + linters: + - gosec + +linters: + enable-all: true + disable: + - dupl + - errcheck + - funlen + - gochecknoglobals + - gocognit + - goconst + - lll + - wsl diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml index a02ed3f24..060f9e6a8 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml +++ b/vendor/github.com/fsouza/go-dockerclient/.travis.yml @@ -1,9 +1,8 @@ dist: xenial language: go go: - - 1.11.x - 1.12.x - - 1.13rc1 + - 1.13.x os: - linux - osx @@ -17,7 +16,6 @@ env: - GO111MODULE=on install: - travis-scripts/win-setup.bash - - make testdeps script: - travis_wait 25 travis-scripts/run-tests.bash services: diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS index a8ae99976..663410f95 100644 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -119,6 +119,7 @@ Kevin Xu Kim, Hirokuni Kostas Lekkas Kyle Allan +Kyle Quest Yunhee Lee Liron Levin Lior Yankovich diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile index 858adec1b..e0880ff67 100644 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -1,34 +1,27 @@ .PHONY: \ all \ - staticcheck \ + lint \ fmt \ - fmtcheck \ pretest \ test \ integration all: test -staticcheck: - GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck - staticcheck ./... - -fmtcheck: - if [ -z "$${SKIP_FMT_CHECK}" ]; then [ -z "$$(gofumpt -s -d . | tee /dev/stderr)" ]; fi +lint: + cd /tmp && GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@latest + golangci-lint run fmt: GO111MODULE=off go get mvdan.cc/gofumpt gofumpt -s -w . -testdeps: - go mod download - -pretest: staticcheck fmtcheck +pretest: lint gotest: go test -race -vet all ./... -test: testdeps pretest gotest +test: pretest gotest integration: go test -tags docker_integration -run TestIntegration -v diff --git a/vendor/github.com/fsouza/go-dockerclient/README.md b/vendor/github.com/fsouza/go-dockerclient/README.md index f310ccc92..b7af7d0b9 100644 --- a/vendor/github.com/fsouza/go-dockerclient/README.md +++ b/vendor/github.com/fsouza/go-dockerclient/README.md @@ -118,7 +118,7 @@ All development commands can be seen in the [Makefile](Makefile). Commited code must pass: -* [staticcheck](https://staticcheck.io/) +* [golangci-lint](integration_unix_test.go) * [gofumpt](https://github.com/mvdan/gofumpt) * [go test](https://golang.org/cmd/go/#hdr-Test_packages) diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml index 793d88b7a..d9f374f50 100644 --- a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml +++ b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml @@ -6,11 +6,9 @@ environment: GOPATH: c:\gopath GOPROXY: https://proxy.golang.org GO111MODULE: on - SKIP_FMT_CHECK: 1 matrix: - - GOVERSION: "1.11.13" - - GOVERSION: "1.12.9" - - GOVERSION: "1.13rc1" + - GOVERSION: "1.12.10" + - GOVERSION: "1.13.1" install: - choco install make - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% @@ -18,8 +16,8 @@ install: - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL build_script: - - make testdeps + - make pretest test_script: - - make pretest gotest + - make gotest matrix: fast_finish: true diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go index 0062e5c5c..eb1a31716 100644 --- a/vendor/github.com/fsouza/go-dockerclient/auth.go +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "os" "path" "strings" @@ -219,7 +220,7 @@ func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { if conf == nil { return authStatus, errors.New("conf is nil") } - resp, err := c.do("POST", "/auth", doOptions{data: conf}) + resp, err := c.do(http.MethodPost, "/auth", doOptions{data: conf}) if err != nil { return authStatus, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go index 6f394bfc1..825ba38ab 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -32,8 +32,8 @@ import ( "time" "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/stdcopy" - "github.com/fsouza/go-dockerclient/internal/jsonmessage" ) const ( @@ -54,7 +54,9 @@ var ( ErrInactivityTimeout = errors.New("inactivity time exceeded timeout") apiVersion112, _ = NewAPIVersion("1.12") + apiVersion118, _ = NewAPIVersion("1.18") apiVersion119, _ = NewAPIVersion("1.19") + apiVersion121, _ = NewAPIVersion("1.21") apiVersion124, _ = NewAPIVersion("1.24") apiVersion125, _ = NewAPIVersion("1.25") apiVersion135, _ = NewAPIVersion("1.35") @@ -269,11 +271,12 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri // See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. // See https://github.com/moby/moby/blob/28d7dba41d0c0d9c7f0dafcc79d3c59f2b3f5dc3/client/options.go#L51 func NewClientFromEnv() (*Client, error) { - client, err := NewVersionedClientFromEnv(os.Getenv("DOCKER_API_VERSION")) + apiVersionString := os.Getenv("DOCKER_API_VERSION") + client, err := NewVersionedClientFromEnv(apiVersionString) if err != nil { return nil, err } - client.SkipServerVersionCheck = true + client.SkipServerVersionCheck = apiVersionString == "" return client, nil } @@ -397,7 +400,7 @@ func (c *Client) Ping() error { // See https://goo.gl/wYfgY1 for more details. func (c *Client) PingWithContext(ctx context.Context) error { path := "/_ping" - resp, err := c.do("GET", path, doOptions{context: ctx}) + resp, err := c.do(http.MethodGet, path, doOptions{context: ctx}) if err != nil { return err } @@ -409,7 +412,7 @@ func (c *Client) PingWithContext(ctx context.Context) error { } func (c *Client) getServerAPIVersionString() (version string, err error) { - resp, err := c.do("GET", "/version", doOptions{}) + resp, err := c.do(http.MethodGet, "/version", doOptions{}) if err != nil { return "", err } @@ -465,7 +468,7 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e req.Header.Set("User-Agent", userAgent) if doOptions.data != nil { req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { + } else if method == http.MethodPost { req.Header.Set("Content-Type", "plain/text") } @@ -520,7 +523,7 @@ func chooseError(ctx context.Context, err error) error { } func (c *Client) stream(method, path string, streamOptions streamOptions) error { - if (method == "POST" || method == "PUT") && streamOptions.in == nil { + if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil { streamOptions.in = bytes.NewReader(nil) } if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { @@ -529,11 +532,11 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error return err } } - return c.streamUrl(method, c.getURL(path), streamOptions) + return c.streamURL(method, c.getURL(path), streamOptions) } -func (c *Client) streamUrl(method, url string, streamOptions streamOptions) error { - if (method == "POST" || method == "PUT") && streamOptions.in == nil { +func (c *Client) streamURL(method, url string, streamOptions streamOptions) error { + if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil { streamOptions.in = bytes.NewReader(nil) } if !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { @@ -547,7 +550,7 @@ func (c *Client) streamUrl(method, url string, streamOptions streamOptions) erro return err } req.Header.Set("User-Agent", userAgent) - if method == "POST" { + if method == http.MethodPost { req.Header.Set("Content-Type", "plain/text") } for key, val := range streamOptions.headers { @@ -606,6 +609,7 @@ func (c *Client) streamUrl(method, url string, streamOptions streamOptions) erro return chooseError(subCtx, err) } + defer resp.Body.Close() } else { if resp, err = c.HTTPClient.Do(req.WithContext(subCtx)); err != nil { if strings.Contains(err.Error(), "connection refused") { @@ -613,11 +617,11 @@ func (c *Client) streamUrl(method, url string, streamOptions streamOptions) erro } return chooseError(subCtx, err) } + defer resp.Body.Close() if streamOptions.reqSent != nil { close(streamOptions.reqSent) } } - defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 400 { return newError(resp) } @@ -776,9 +780,10 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close errs := make(chan error, 1) quit := make(chan struct{}) go func() { - //lint:ignore SA1019 this is needed here + //nolint:staticcheck clientconn := httputil.NewClientConn(dial, nil) defer clientconn.Close() + //nolint:bodyclose clientconn.Do(req) if hijackOptions.success != nil { hijackOptions.success <- struct{}{} @@ -874,25 +879,26 @@ func (c *Client) getURL(path string) string { } func (c *Client) getPath(basepath string, opts interface{}) (string, error) { + queryStr, requiredAPIVersion := queryStringVersion(opts) + return c.pathVersionCheck(basepath, queryStr, requiredAPIVersion) +} + +func (c *Client) pathVersionCheck(basepath, queryStr string, requiredAPIVersion APIVersion) (string, error) { urlStr := strings.TrimRight(c.endpointURL.String(), "/") if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol { urlStr = "" } - queryStr, requiredAPIVersion := queryStringVersion(opts) - if c.requestedAPIVersion != nil { if c.requestedAPIVersion.GreaterThanOrEqualTo(requiredAPIVersion) { return fmt.Sprintf("%s/v%s%s?%s", urlStr, c.requestedAPIVersion, basepath, queryStr), nil - } else { - return "", fmt.Errorf("API %s requires version %s, requested version %s is insufficient", - basepath, requiredAPIVersion, c.requestedAPIVersion) } + return "", fmt.Errorf("API %s requires version %s, requested version %s is insufficient", + basepath, requiredAPIVersion, c.requestedAPIVersion) } if requiredAPIVersion != nil { return fmt.Sprintf("%s/v%s%s?%s", urlStr, requiredAPIVersion, basepath, queryStr), nil - } else { - return fmt.Sprintf("%s%s?%s", urlStr, basepath, queryStr), nil } + return fmt.Sprintf("%s%s?%s", urlStr, basepath, queryStr), nil } // getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX @@ -922,7 +928,7 @@ func queryStringVersion(opts interface{}) (string, APIVersion) { if value.Kind() != reflect.Struct { return "", nil } - var apiVersion APIVersion = nil + var apiVersion APIVersion items := url.Values(map[string][]string{}) for i := 0; i < value.NumField(); i++ { field := value.Type().Field(i) @@ -1002,7 +1008,7 @@ func addQueryStringValue(items url.Values, key string, v reflect.Value) bool { if vLen > 0 { for i := 0; i < vLen; i++ { if addQueryStringValue(items, key, v.Index(i)) { - valuesAdded += 1 + valuesAdded++ } } } diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/fsouza/go-dockerclient/client_windows.go index 63d97ec65..731d5c962 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client_windows.go +++ b/vendor/github.com/fsouza/go-dockerclient/client_windows.go @@ -32,7 +32,8 @@ func (c *Client) initializeNativeClient(trFunc func() *http.Transport) { return } namedPipePath := c.endpointURL.Path - dialFunc := func(network, addr string) (net.Conn, error) { + //nolint:unparam + dialFunc := func(_, addr string) (net.Conn, error) { timeout := namedPipeConnectTimeout return winio.DialPipe(namedPipePath, &timeout) } diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go index 898646fbf..0a8ab361c 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -85,7 +85,7 @@ type NetworkList struct { // See https://goo.gl/kaOHGw for more details. func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { path := "/containers/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -474,6 +474,12 @@ type Container struct { RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty" toml:"RestartCount,omitempty"` AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty" toml:"AppArmorProfile,omitempty"` + + MountLabel string `json:"MountLabel,omitempty" yaml:"MountLabel,omitempty" toml:"MountLabel,omitempty"` + ProcessLabel string `json:"ProcessLabel,omitempty" yaml:"ProcessLabel,omitempty" toml:"ProcessLabel,omitempty"` + Platform string `json:"Platform,omitempty" yaml:"Platform,omitempty" toml:"Platform,omitempty"` + SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" toml:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"` } // UpdateContainerOptions specify parameters to the UpdateContainer function. @@ -500,7 +506,7 @@ type UpdateContainerOptions struct { // // See https://goo.gl/Y6fXUy for more details. func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error { - resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{ + resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+id+"/update"), doOptions{ data: opts, forceJSON: true, context: opts.Context, @@ -528,7 +534,7 @@ type RenameContainerOptions struct { // // See https://goo.gl/46inai for more details. func (c *Client) RenameContainer(opts RenameContainerOptions) error { - resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{ + resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{ context: opts.Context, }) if err != nil { @@ -549,13 +555,14 @@ func (c *Client) InspectContainer(id string) (*Container, error) { // The context object can be used to cancel the inspect request. // // See https://goo.gl/FaI5JT for more details. +//nolint:golint func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) { return c.inspectContainer(id, doOptions{context: ctx}) } func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) { path := "/containers/" + id + "/json" - resp, err := c.do("GET", path, opts) + resp, err := c.do(http.MethodGet, path, opts) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchContainer{ID: id} @@ -575,7 +582,7 @@ func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) // See https://goo.gl/15KKzh for more details. func (c *Client) ContainerChanges(id string) ([]Change, error) { path := "/containers/" + id + "/changes" - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchContainer{ID: id} @@ -611,7 +618,7 @@ type CreateContainerOptions struct { func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { path := "/containers/create?" + queryString(opts) resp, err := c.do( - "POST", + http.MethodPost, path, doOptions{ data: struct { @@ -729,6 +736,7 @@ type HostConfig struct { Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty" toml:"Binds,omitempty"` CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty" toml:"CapAdd,omitempty"` CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty" toml:"CapDrop,omitempty"` + Capabilities []string `json:"Capabilities,omitempty" yaml:"Capabilities,omitempty" toml:"Capabilities,omitempty"` // Mutually exclusive w.r.t. CapAdd and CapDrop API v1.40 GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty" toml:"GroupAdd,omitempty"` ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty" toml:"ContainerIDFile,omitempty"` LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty" toml:"LxcConf,omitempty"` @@ -742,6 +750,8 @@ type HostConfig struct { UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty" toml:"UsernsMode,omitempty"` NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty" toml:"NetworkMode,omitempty"` IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty" toml:"IpcMode,omitempty"` + Isolation string `json:"Isolation,omitempty" yaml:"Isolation,omitempty" toml:"Isolation,omitempty"` // Windows only + ConsoleSize [2]int `json:"ConsoleSize,omitempty" yaml:"ConsoleSize,omitempty" toml:"ConsoleSize,omitempty"` // Windows only height x width PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty" toml:"PidMode,omitempty"` UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty" toml:"UTSMode,omitempty"` RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty" toml:"RestartPolicy,omitempty"` @@ -749,6 +759,7 @@ type HostConfig struct { DeviceCgroupRules []string `json:"DeviceCgroupRules,omitempty" yaml:"DeviceCgroupRules,omitempty" toml:"DeviceCgroupRules,omitempty"` LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty" toml:"LogConfig,omitempty"` SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty" toml:"SecurityOpt,omitempty"` + CgroupnsMode string `json:"CgroupnsMode,omitempty" yaml:"CgroupnsMode,omitempty" toml:"CgroupnsMode,omitempty"` // v1.40+ Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty" toml:"Cgroup,omitempty"` CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty" toml:"CgroupParent,omitempty"` Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` @@ -784,6 +795,8 @@ type HostConfig struct { IOMaximumBandwidth int64 `json:"IOMaximumBandwidth,omitempty" yaml:"IOMaximumBandwidth,omitempty"` IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"` Mounts []HostMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + MaskedPaths []string `json:"MaskedPaths,omitempty" yaml:"MaskedPaths,omitempty" toml:"MaskedPaths,omitempty"` + ReadonlyPaths []string `json:"ReadonlyPaths,omitempty" yaml:"ReadonlyPaths,omitempty" toml:"ReadonlyPaths,omitempty"` Runtime string `json:"Runtime,omitempty" yaml:"Runtime,omitempty" toml:"Runtime,omitempty"` Init bool `json:",omitempty" yaml:",omitempty"` Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"` @@ -820,6 +833,7 @@ func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { // API 1.24 or greater. // // See https://goo.gl/fbOSZy for more details. +//nolint:golint func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error { return c.startContainer(id, hostConfig, doOptions{context: ctx}) } @@ -833,7 +847,7 @@ func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOption opts.data = hostConfig opts.forceJSON = true } - resp, err := c.do("POST", path, opts) + resp, err := c.do(http.MethodPost, path, opts) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id, Err: err} @@ -860,13 +874,14 @@ func (c *Client) StopContainer(id string, timeout uint) error { // container request. // // See https://goo.gl/R9dZcV for more details. +//nolint:golint func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error { return c.stopContainer(id, timeout, doOptions{context: ctx}) } func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error { path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) - resp, err := c.do("POST", path, opts) + resp, err := c.do(http.MethodPost, path, opts) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id} @@ -886,7 +901,7 @@ func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error { // See https://goo.gl/MrAKQ5 for more details. func (c *Client) RestartContainer(id string, timeout uint) error { path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) - resp, err := c.do("POST", path, doOptions{}) + resp, err := c.do(http.MethodPost, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id} @@ -902,7 +917,7 @@ func (c *Client) RestartContainer(id string, timeout uint) error { // See https://goo.gl/D1Yaii for more details. func (c *Client) PauseContainer(id string) error { path := fmt.Sprintf("/containers/%s/pause", id) - resp, err := c.do("POST", path, doOptions{}) + resp, err := c.do(http.MethodPost, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id} @@ -918,7 +933,7 @@ func (c *Client) PauseContainer(id string) error { // See https://goo.gl/sZ2faO for more details. func (c *Client) UnpauseContainer(id string) error { path := fmt.Sprintf("/containers/%s/unpause", id) - resp, err := c.do("POST", path, doOptions{}) + resp, err := c.do(http.MethodPost, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id} @@ -948,7 +963,7 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { args = fmt.Sprintf("?ps_args=%s", psArgs) } path := fmt.Sprintf("/containers/%s/top%s", id, args) - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return result, &NoSuchContainer{ID: id} @@ -1116,7 +1131,7 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) { reqSent := make(chan struct{}) go func() { defer close(errC) - err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ + err := c.stream(http.MethodGet, fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ rawJSONStream: true, useJSONDecoder: true, stdout: writeCloser, @@ -1184,7 +1199,7 @@ type KillContainerOptions struct { // See https://goo.gl/JnTxXZ for more details. func (c *Client) KillContainer(opts KillContainerOptions) error { path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { e, ok := err.(*Error) if !ok { @@ -1225,7 +1240,7 @@ type RemoveContainerOptions struct { // See https://goo.gl/hL5IPC for more details. func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { path := "/containers/" + opts.ID + "?" + queryString(opts) - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: opts.ID} @@ -1254,7 +1269,7 @@ type UploadToContainerOptions struct { func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - return c.stream("PUT", url, streamOptions{ + return c.stream(http.MethodPut, url, streamOptions{ in: opts.InputStream, context: opts.Context, }) @@ -1277,7 +1292,7 @@ type DownloadFromContainerOptions struct { func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - return c.stream("GET", url, streamOptions{ + return c.stream(http.MethodGet, url, streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -1310,7 +1325,7 @@ func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead") } url := fmt.Sprintf("/containers/%s/copy", opts.Container) - resp, err := c.do("POST", url, doOptions{ + resp, err := c.do(http.MethodPost, url, doOptions{ data: opts, context: opts.Context, }) @@ -1338,12 +1353,13 @@ func (c *Client) WaitContainer(id string) (int, error) { // inspect request. // // See https://goo.gl/4AGweZ for more details. +//nolint:golint func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) { return c.waitContainer(id, doOptions{context: ctx}) } func (c *Client) waitContainer(id string, opts doOptions) (int, error) { - resp, err := c.do("POST", "/containers/"+id+"/wait", opts) + resp, err := c.do(http.MethodPost, "/containers/"+id+"/wait", opts) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return 0, &NoSuchContainer{ID: id} @@ -1377,7 +1393,7 @@ type CommitContainerOptions struct { // See https://goo.gl/CzIguf for more details. func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { path := "/commit?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Run, context: opts.Context, }) @@ -1412,6 +1428,9 @@ type AttachToContainerOptions struct { // to unexpected behavior. Success chan struct{} + // Override the key sequence for detaching a container. + DetachKeys string + // Use raw terminal? Usually true when the container contains a TTY. RawTerminal bool `qs:"-"` @@ -1451,7 +1470,7 @@ func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (Cl return nil, &NoSuchContainer{ID: opts.Container} } path := "/containers/" + opts.Container + "/attach?" + queryString(opts) - return c.hijack("POST", path, hijackOptions{ + return c.hijack(http.MethodPost, path, hijackOptions{ success: opts.Success, setRawTerminal: opts.RawTerminal, in: opts.InputStream, @@ -1501,7 +1520,7 @@ func (c *Client) Logs(opts LogsOptions) error { opts.Tail = "all" } path := "/containers/" + opts.Container + "/logs?" + queryString(opts) - return c.stream("GET", path, streamOptions{ + return c.stream(http.MethodGet, path, streamOptions{ setRawTerminal: opts.RawTerminal, stdout: opts.OutputStream, stderr: opts.ErrorStream, @@ -1517,7 +1536,7 @@ func (c *Client) ResizeContainerTTY(id string, height, width int) error { params := make(url.Values) params.Set("h", strconv.Itoa(height)) params.Set("w", strconv.Itoa(width)) - resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) + resp, err := c.do(http.MethodPost, "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) if err != nil { return err } @@ -1545,7 +1564,7 @@ func (c *Client) ExportContainer(opts ExportContainerOptions) error { return &NoSuchContainer{ID: opts.ID} } url := fmt.Sprintf("/containers/%s/export", opts.ID) - return c.stream("GET", url, streamOptions{ + return c.stream(http.MethodGet, url, streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -1574,7 +1593,7 @@ type PruneContainersResults struct { // See https://goo.gl/wnkgDT for more details. func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) { path := "/containers/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/distribution.go b/vendor/github.com/fsouza/go-dockerclient/distribution.go index d0f8ce74c..6e5e12f7d 100644 --- a/vendor/github.com/fsouza/go-dockerclient/distribution.go +++ b/vendor/github.com/fsouza/go-dockerclient/distribution.go @@ -6,6 +6,7 @@ package docker import ( "encoding/json" + "net/http" "github.com/docker/docker/api/types/registry" ) @@ -13,7 +14,7 @@ import ( // InspectDistribution returns image digest and platform information by contacting the registry func (c *Client) InspectDistribution(name string) (*registry.DistributionInspect, error) { path := "/distribution/" + name + "/json" - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go index 3a3364d9d..6de7c5535 100644 --- a/vendor/github.com/fsouza/go-dockerclient/event.go +++ b/vendor/github.com/fsouza/go-dockerclient/event.go @@ -178,7 +178,7 @@ func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { return nil } -func (eventState *eventMonitoringState) disableEventMonitoring() error { +func (eventState *eventMonitoringState) disableEventMonitoring() { eventState.Lock() defer eventState.Unlock() @@ -191,7 +191,6 @@ func (eventState *eventMonitoringState) disableEventMonitoring() error { close(eventState.C) close(eventState.errC) } - return nil } func (eventState *eventMonitoringState) monitorEvents(c *Client) { @@ -330,17 +329,18 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan if err != nil { return err } - //lint:ignore SA1019 this is needed here + //nolint:staticcheck conn := httputil.NewClientConn(dial, nil) - req, err := http.NewRequest("GET", uri, nil) + req, err := http.NewRequest(http.MethodGet, uri, nil) if err != nil { return err } + //nolint:bodyclose res, err := conn.Do(req) if err != nil { return err } - //lint:ignore SA1019 ClientConn is needed here + //nolint:staticcheck go func(res *http.Response, conn *httputil.ClientConn) { defer conn.Close() defer res.Body.Close() diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go index d804b10b8..48d1ad349 100644 --- a/vendor/github.com/fsouza/go-dockerclient/exec.go +++ b/vendor/github.com/fsouza/go-dockerclient/exec.go @@ -30,6 +30,7 @@ type CreateExecOptions struct { Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"` + DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"` Context context.Context `json:"-"` AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"` AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` @@ -50,7 +51,7 @@ func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { return nil, errors.New("exec configuration WorkingDir is only supported in API#1.35 and above") } path := fmt.Sprintf("/containers/%s/exec", opts.Container) - resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{data: opts, context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchContainer{ID: opts.Container} @@ -119,7 +120,7 @@ func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWa path := fmt.Sprintf("/exec/%s/start", id) if opts.Detach { - resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{data: opts, context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchExec{ID: id} @@ -130,7 +131,7 @@ func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWa return nil, nil } - return c.hijack("POST", path, hijackOptions{ + return c.hijack(http.MethodPost, path, hijackOptions{ success: opts.Success, setRawTerminal: opts.RawTerminal, in: opts.InputStream, @@ -151,7 +152,7 @@ func (c *Client) ResizeExecTTY(id string, height, width int) error { params.Set("w", strconv.Itoa(width)) path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) - resp, err := c.do("POST", path, doOptions{}) + resp, err := c.do(http.MethodPost, path, doOptions{}) if err != nil { return err } @@ -192,7 +193,7 @@ type ExecInspect struct { // See https://goo.gl/ctMUiW for more details func (c *Client) InspectExec(id string) (*ExecInspect, error) { path := fmt.Sprintf("/exec/%s/json", id) - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchExec{ID: id} diff --git a/vendor/github.com/fsouza/go-dockerclient/go.mod b/vendor/github.com/fsouza/go-dockerclient/go.mod index 90183e9d2..cb0ef690c 100644 --- a/vendor/github.com/fsouza/go-dockerclient/go.mod +++ b/vendor/github.com/fsouza/go-dockerclient/go.mod @@ -1,26 +1,26 @@ module github.com/fsouza/go-dockerclient -go 1.11 +go 1.12 require ( - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/hcsshim v0.8.6 // indirect + github.com/containerd/containerd v1.3.0 // indirect github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b + github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/protobuf v1.3.0 // indirect github.com/google/go-cmp v0.3.1 github.com/gorilla/mux v1.7.3 - github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd + github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v0.1.1 // indirect - golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 - golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 + golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad google.golang.org/grpc v1.22.0 // indirect gotest.tools v2.2.0+incompatible // indirect ) diff --git a/vendor/github.com/fsouza/go-dockerclient/go.sum b/vendor/github.com/fsouza/go-dockerclient/go.sum index 00c823418..c4df2e956 100644 --- a/vendor/github.com/fsouza/go-dockerclient/go.sum +++ b/vendor/github.com/fsouza/go-dockerclient/go.sum @@ -7,14 +7,16 @@ github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jB github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU= -github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -31,12 +33,12 @@ github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM= -github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= @@ -54,8 +56,8 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -67,9 +69,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ= -golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go index 31b6c53f4..5f72d6645 100644 --- a/vendor/github.com/fsouza/go-dockerclient/image.go +++ b/vendor/github.com/fsouza/go-dockerclient/image.go @@ -109,7 +109,7 @@ type ListImagesOptions struct { // See https://goo.gl/BVzauZ for more details. func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { path := "/images/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -129,13 +129,14 @@ type ImageHistory struct { Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Tags,omitempty"` CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" toml:"CreatedBy,omitempty"` Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` + Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"` } // ImageHistory returns the history of the image by its name or ID. // // See https://goo.gl/fYtxQa for more details. func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { - resp, err := c.do("GET", "/images/"+name+"/history", doOptions{}) + resp, err := c.do(http.MethodGet, "/images/"+name+"/history", doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, ErrNoSuchImage @@ -154,7 +155,7 @@ func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { // // See https://goo.gl/Vd2Pck for more details. func (c *Client) RemoveImage(name string) error { - resp, err := c.do("DELETE", "/images/"+name, doOptions{}) + resp, err := c.do(http.MethodDelete, "/images/"+name, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return ErrNoSuchImage @@ -181,7 +182,7 @@ type RemoveImageOptions struct { // See https://goo.gl/Vd2Pck for more details. func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) - resp, err := c.do("DELETE", uri, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, uri, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return ErrNoSuchImage @@ -196,7 +197,7 @@ func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error // // See https://goo.gl/ncLTG8 for more details. func (c *Client) InspectImage(name string) (*Image, error) { - resp, err := c.do("GET", "/images/"+name+"/json", doOptions{}) + resp, err := c.do(http.MethodGet, "/images/"+name+"/json", doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, ErrNoSuchImage @@ -271,7 +272,7 @@ func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error name := opts.Name opts.Name = "" path := "/images/" + name + "/push?" + queryString(&opts) - return c.stream("POST", path, streamOptions{ + return c.stream(http.MethodPost, path, streamOptions{ setRawTerminal: true, rawJSONStream: opts.RawJSONStream, headers: headers, @@ -322,12 +323,13 @@ func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error return c.createImage(&opts, headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) } +//nolint:golint func (c *Client) createImage(opts interface{}, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { url, err := c.getPath("/images/create", opts) if err != nil { return err } - return c.streamUrl("POST", url, streamOptions{ + return c.streamURL(http.MethodPost, url, streamOptions{ setRawTerminal: true, headers: headers, in: in, @@ -351,7 +353,7 @@ type LoadImageOptions struct { // // See https://goo.gl/rEsBV3 for more details. func (c *Client) LoadImage(opts LoadImageOptions) error { - return c.stream("POST", "/images/load", streamOptions{ + return c.stream(http.MethodPost, "/images/load", streamOptions{ setRawTerminal: true, in: opts.InputStream, stdout: opts.OutputStream, @@ -373,7 +375,7 @@ type ExportImageOptions struct { // // See https://goo.gl/AuySaA for more details. func (c *Client) ExportImage(opts ExportImageOptions) error { - return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ + return c.stream(http.MethodGet, fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -403,7 +405,7 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error { var err error var exporturl string if c.requestedAPIVersion.GreaterThanOrEqualTo(apiVersion125) { - var str string = opts.Names[0] + str := opts.Names[0] for _, val := range opts.Names[1:] { str += "," + val } @@ -412,7 +414,6 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error { OutputStream: opts.OutputStream, InactivityTimeout: opts.InactivityTimeout, Context: opts.Context, - }) } else { exporturl, err = c.getPath("/images/get", &opts) @@ -420,7 +421,7 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error { if err != nil { return err } - return c.streamUrl("GET", exporturl, streamOptions{ + return c.streamURL(http.MethodGet, exporturl, streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -471,35 +472,39 @@ func (c *Client) ImportImage(opts ImportImageOptions) error { // https://goo.gl/4nYHwV. type BuildImageOptions struct { Context context.Context - Name string `qs:"t"` - Dockerfile string `qs:"dockerfile"` - CacheFrom []string `qs:"-"` - Memory int64 `qs:"memory"` - Memswap int64 `qs:"memswap"` - CPUShares int64 `qs:"cpushares"` - CPUQuota int64 `qs:"cpuquota"` - CPUPeriod int64 `qs:"cpuperiod"` - CPUSetCPUs string `qs:"cpusetcpus"` - Labels map[string]string `qs:"labels"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - Remote string `qs:"remote"` + Name string `qs:"t"` + Dockerfile string `ver:"1.25"` + ExtraHosts string `ver:"1.28"` + CacheFrom []string `qs:"-" ver:"1.25"` + Memory int64 + Memswap int64 + ShmSize int64 + CPUShares int64 + CPUQuota int64 `ver:"1.21"` + CPUPeriod int64 `ver:"1.21"` + CPUSetCPUs string + Labels map[string]string + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + Remote string Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header ContextDir string `qs:"-"` - Ulimits []ULimit `qs:"-"` - BuildArgs []BuildArg `qs:"-"` - NetworkMode string `qs:"networkmode"` + Ulimits []ULimit `qs:"-" ver:"1.18"` + BuildArgs []BuildArg `qs:"-" ver:"1.21"` + NetworkMode string `ver:"1.25"` + Platform string `ver:"1.32"` InactivityTimeout time.Duration `qs:"-"` - CgroupParent string `qs:"cgroupparent"` - SecurityOpt []string `qs:"securityopt"` - Target string `gs:"target"` - NoCache bool `qs:"nocache"` - SuppressOutput bool `qs:"q"` - Pull bool `qs:"pull"` - RmTmpContainer bool `qs:"rm"` - ForceRmTmpContainer bool `qs:"forcerm"` - RawJSONStream bool `qs:"-"` + CgroupParent string + SecurityOpt []string + Target string + Outputs string `ver:"1.40"` + NoCache bool + SuppressOutput bool `qs:"q"` + Pull bool `ver:"1.16"` + RmTmpContainer bool `qs:"rm"` + ForceRmTmpContainer bool `qs:"forcerm" ver:"1.12"` + RawJSONStream bool `qs:"-"` } // BuildArg represents arguments that can be passed to the image when building @@ -542,13 +547,16 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { return err } } - qs := queryString(&opts) + qs, ver := queryStringVersion(&opts) - if c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion125) && len(opts.CacheFrom) > 0 { + if len(opts.CacheFrom) > 0 { if b, err := json.Marshal(opts.CacheFrom); err == nil { item := url.Values(map[string][]string{}) item.Add("cachefrom", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion125.GreaterThan(ver) { + ver = apiVersion125 + } } } @@ -557,6 +565,9 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { item := url.Values(map[string][]string{}) item.Add("ulimits", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion118.GreaterThan(ver) { + ver = apiVersion118 + } } } @@ -569,10 +580,18 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { item := url.Values(map[string][]string{}) item.Add("buildargs", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion121.GreaterThan(ver) { + ver = apiVersion121 + } } } - return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{ + buildURL, err := c.pathVersionCheck("/build", qs, ver) + if err != nil { + return err + } + + return c.streamURL(http.MethodPost, buildURL, streamOptions{ setRawTerminal: true, rawJSONStream: opts.RawJSONStream, headers: headers, @@ -610,7 +629,7 @@ func (c *Client) TagImage(name string, opts TagImageOptions) error { if name == "" { return ErrNoSuchImage } - resp, err := c.do("POST", "/images/"+name+"/tag?"+queryString(&opts), doOptions{ + resp, err := c.do(http.MethodPost, "/images/"+name+"/tag?"+queryString(&opts), doOptions{ context: opts.Context, }) if err != nil { @@ -666,7 +685,7 @@ type APIImageSearch struct { // // See https://goo.gl/KLO9IZ for more details. func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { - resp, err := c.do("GET", "/images/search?term="+term, doOptions{}) + resp, err := c.do(http.MethodGet, "/images/search?term="+term, doOptions{}) if err != nil { return nil, err } @@ -687,7 +706,7 @@ func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImage return nil, err } - resp, err := c.do("GET", "/images/search?term="+term, doOptions{ + resp, err := c.do(http.MethodGet, "/images/search?term="+term, doOptions{ headers: headers, }) if err != nil { @@ -725,7 +744,7 @@ type PruneImagesResults struct { // See https://goo.gl/qfZlbZ for more details. func (c *Client) PruneImages(opts PruneImagesOptions) (*PruneImagesResults, error) { path := "/images/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go deleted file mode 100644 index f11ee0ee3..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go +++ /dev/null @@ -1,509 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import ( - "archive/tar" - "bufio" - "compress/gzip" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - -// Compression is the state represents if compressed or not. -type Compression int - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -// WhiteoutFormat is the format of whiteouts unpacked -type WhiteoutFormat int - -// TarOptions wraps the tar options. -type TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *idtools.Identity - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - NoLchown bool - InUserNS bool - IncludeSourceDir bool - NoOverwriteDirNonDir bool -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - log.Printf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - log.Printf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - log.Printf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - log.Print("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - include := include - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - log.Printf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = pm.Matches(relFilePath) - if err != nil { - log.Printf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - log.Printf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - //lint:ignore ST1005 this is vendored/copied code - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - //lint:ignore ST1005 this is vendored/copied code - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IdentityMapping *idtools.IdentityMapping - ChownOpts *idtools.Identity - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IdentityMapping: idMapping, - ChownOpts: chownOpts, - } -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - // check whether the file is overlayfs whiteout - // if yes, skip re-mapping container ID mappings. - isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 - - // handle re-mapping container ID mappings back to host ID mappings before - // writing tar headers/files. We skip whiteout files because they were written - // by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && - !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && - !ta.IdentityMapping.Empty() { - fileIdentity, err := getFileIdentity(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIdentity) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - //lint:ignore SA1019 this is vendored/copied code - hdr.Xattrs = make(map[string]string) - //lint:ignore SA1019 this is vendored/copied code - hdr.Xattrs["security.capability"] = string(capability) - } - return nil -} - -// FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return nil, err - } - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } - return hdr, nil -} - -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - -// canonicalTarName provides a platform-independent and consistent posix-style -// path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go deleted file mode 100644 index e2059e489..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - //lint:ignore SA1019 this is vendored/copied code - if hdr.Xattrs != nil { - //lint:ignore SA1019 this is vendored/copied code - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return -} - -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, err - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go deleted file mode 100644 index 72822c857..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -// +build !linux - -package archive - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go deleted file mode 100644 index bb6bf7145..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -// +build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/idtools" - "golang.org/x/sys/unix" -) - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileIdentity(stat interface{}) (idtools.Identity, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert - } - } - - return -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go deleted file mode 100644 index 33c1dff03..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/longpath" -) - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - //lint:ignore ST1005 Windows should be capitalized :) - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil -} - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows - return -} - -func getFileIdentity(stat interface{}) (idtools.Identity, error) { - // no notion of file ownership mapping yet on Windows - return idtools.Identity{}, nil -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) - permPart := perm & os.ModePerm - noPermPart := perm &^ os.ModePerm - // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 - - return noPermPart | permPart -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - // do nothing. no notion of Rdev, Nlink in stat on Windows - return -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go deleted file mode 100644 index 39ea287bf..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -// +build !windows - -package archive - -import ( - "os" - "syscall" -) - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go deleted file mode 100644 index a93130474..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import "os" - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go deleted file mode 100644 index 45d45f20e..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import ( - "os" - "path/filepath" -) - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(filepath.FromSlash(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(os.PathSeparator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go deleted file mode 100644 index a61c22a08..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go b/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go deleted file mode 100644 index 99a32ae05..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package jsonmessage - -import ( - "encoding/json" - "fmt" - "io" - "os" - "strings" - "time" - - units "github.com/docker/go-units" - "github.com/fsouza/go-dockerclient/internal/term" - gotty "github.com/ijc/Gotty" -) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time isalways the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` - // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` - Units string `json:"units,omitempty"` - nowFunc func() time.Time - winSize int -} - -func (p *JSONProgress) String() string { - var ( - width = p.width() - pbBox string - numbersBox string - timeLeftBox string - ) - if p.Current <= 0 && p.Total <= 0 { - return "" - } - if p.Total <= 0 { - switch p.Units { - case "": - current := units.HumanSize(float64(p.Current)) - return fmt.Sprintf("%8v", current) - default: - return fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - switch { - case p.HideCounts: - case p.Units == "": // no units, use bytes - current := units.HumanSize(float64(p.Current)) - total := units.HumanSize(float64(p.Total)) - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - default: - numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := p.now().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// shim for testing -func (p *JSONProgress) now() time.Time { - if p.nowFunc == nil { - p.nowFunc = func() time.Time { - return time.Now().UTC() - } - } - return p.nowFunc() -} - -// shim for testing -func (p *JSONProgress) width() int { - if p.winSize != 0 { - return p.winSize - } - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - return int(ws.Width) - } - return 200 -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated - // Aux contains out-of-band data, such as digests for push signing and image id after building. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ -type termInfo interface { - Parse(attr string, params ...interface{}) (string, error) -} - -type noTermInfo struct{} // canary used when no terminfo. - -func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { - return "", fmt.Errorf("noTermInfo") -} - -func clearLine(out io.Writer, ti termInfo) error { - // el2 (clear whole line) is not exposed by terminfo. - - // First clear line from beginning to cursor - if attr, err := ti.Parse("el1"); err == nil { - _, err = fmt.Fprintf(out, "%s", attr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "\x1b[1K") - if err != nil { - return err - } - } - // Then clear line from cursor to end - if attr, err := ti.Parse("el"); err == nil { - _, err = fmt.Fprintf(out, "%s", attr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "\x1b[K") - if err != nil { - return err - } - } - - return nil -} - -func cursorUp(out io.Writer, ti termInfo, l int) error { - if l == 0 { // Should never be the case, but be tolerant - return nil - } - if attr, err := ti.Parse("cuu", l); err == nil { - _, err = fmt.Fprintf(out, "%s", attr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "\x1b[%dA", l) - if err != nil { - return err - } - } - return nil -} - -func cursorDown(out io.Writer, ti termInfo, l int) error { - if l == 0 { // Should never be the case, but be tolerant - return nil - } - if attr, err := ti.Parse("cud", l); err == nil { - _, err = fmt.Fprintf(out, "%s", attr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "\x1b[%dB", l) - if err != nil { - return err - } - } - - return nil -} - -// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("authentication is required") - } - return jm.Error - } - var endl string - if termInfo != nil && jm.Stream == "" && jm.Progress != nil { - clearLine(out, termInfo) - endl = "\r" - _, err := fmt.Fprint(out, endl) - if err != nil { - return err - } - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - _, err := fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - if err != nil { - return err - } - } else if jm.Time != 0 { - _, err := fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - if err != nil { - return err - } - } - if jm.ID != "" { - _, err := fmt.Fprintf(out, "%s: ", jm.ID) - if err != nil { - return err - } - } - if jm.From != "" { - _, err := fmt.Fprintf(out, "(from %s) ", jm.From) - if err != nil { - return err - } - } - if jm.Progress != nil && termInfo != nil { - _, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - if err != nil { - return err - } - } else if jm.ProgressMessage != "" { // deprecated - _, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - if err != nil { - return err - } - } else if jm.Stream != "" { - _, err := fmt.Fprintf(out, "%s%s", jm.Stream, endl) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - if err != nil { - return err - } - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - ) - - var termInfo termInfo - - if isTerminal { - term := os.Getenv("TERM") - if term == "" { - term = "vt102" - } - - var err error - if termInfo, err = gotty.OpenTermInfo(term); err != nil { - termInfo = &noTermInfo{} - } - } - - for { - diff := 0 - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = len(ids) - ids[jm.ID] = line - if termInfo != nil { - _, err := fmt.Fprintf(out, "\n") - if err != nil { - return err - } - } - } - diff = len(ids) - line - if termInfo != nil { - if err := cursorUp(out, termInfo, diff); err != nil { - return err - } - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]int) - } - err := jm.Display(out, termInfo) - if jm.ID != "" && termInfo != nil { - if err := cursorDown(out, termInfo, diff); err != nil { - return err - } - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go deleted file mode 100644 index 7d3c11358..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package term - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go deleted file mode 100644 index 92a80a308..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -// +build !windows - -package term - -import "golang.org/x/sys/unix" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col} - return ws, err -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go deleted file mode 100644 index 4a07a5d19..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package term - -import "github.com/Azure/go-ansiterm/winterm" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go index 01fd1f687..d42a66df6 100644 --- a/vendor/github.com/fsouza/go-dockerclient/misc.go +++ b/vendor/github.com/fsouza/go-dockerclient/misc.go @@ -8,6 +8,7 @@ import ( "context" "encoding/json" "net" + "net/http" "strings" "github.com/docker/docker/api/types/swarm" @@ -22,7 +23,7 @@ func (c *Client) Version() (*Env, error) { // VersionWithContext returns version information about the docker server. func (c *Client) VersionWithContext(ctx context.Context) (*Env, error) { - resp, err := c.do("GET", "/version", doOptions{context: ctx}) + resp, err := c.do(http.MethodGet, "/version", doOptions{context: ctx}) if err != nil { return nil, err } @@ -37,6 +38,7 @@ func (c *Client) VersionWithContext(ctx context.Context) (*Env, error) { // DockerInfo contains information about the Docker server // // See https://goo.gl/bHUoz9 for more details. +//nolint:golint type DockerInfo struct { ID string Containers int @@ -162,7 +164,7 @@ type IndexInfo struct { // // See https://goo.gl/ElTHi2 for more details. func (c *Client) Info() (*DockerInfo, error) { - resp, err := c.do("GET", "/info", doOptions{}) + resp, err := c.do(http.MethodGet, "/info", doOptions{}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go index 2331e08bf..3a06a52d5 100644 --- a/vendor/github.com/fsouza/go-dockerclient/network.go +++ b/vendor/github.com/fsouza/go-dockerclient/network.go @@ -48,7 +48,7 @@ type Endpoint struct { // // See https://goo.gl/6GugX3 for more details. func (c *Client) ListNetworks() ([]Network, error) { - resp, err := c.do("GET", "/networks", doOptions{}) + resp, err := c.do(http.MethodGet, "/networks", doOptions{}) if err != nil { return nil, err } @@ -75,7 +75,7 @@ func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) qs := make(url.Values) qs.Add("filters", string(params)) path := "/networks?" + qs.Encode() - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) // See https://goo.gl/6GugX3 for more details. func (c *Client) NetworkInfo(id string) (*Network, error) { path := "/networks/" + id - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchNetwork{ID: id} @@ -159,7 +159,7 @@ type IPAMConfig struct { // See https://goo.gl/6GugX3 for more details. func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { resp, err := c.do( - "POST", + http.MethodPost, "/networks/create", doOptions{ data: opts, @@ -193,7 +193,7 @@ func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { // // See https://goo.gl/6GugX3 for more details. func (c *Client) RemoveNetwork(id string) error { - resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) + resp, err := c.do(http.MethodDelete, "/networks/"+id, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchNetwork{ID: id} @@ -253,7 +253,7 @@ type EndpointIPAMConfig struct { // // See https://goo.gl/6GugX3 for more details. func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { - resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{ + resp, err := c.do(http.MethodPost, "/networks/"+id+"/connect", doOptions{ data: opts, context: opts.Context, }) @@ -272,7 +272,7 @@ func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error // // See https://goo.gl/6GugX3 for more details. func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { - resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts}) + resp, err := c.do(http.MethodPost, "/networks/"+id+"/disconnect", doOptions{data: opts}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} @@ -303,7 +303,7 @@ type PruneNetworksResults struct { // See https://goo.gl/kX0S9h for more details. func (c *Client) PruneNetworks(opts PruneNetworksOptions) (*PruneNetworksResults, error) { path := "/networks/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go index 088790313..9cec41512 100644 --- a/vendor/github.com/fsouza/go-dockerclient/plugin.go +++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go @@ -35,15 +35,26 @@ type InstallPluginOptions struct { // // See https://goo.gl/C4t7Tz for more details. func (c *Client) InstallPlugins(opts InstallPluginOptions) error { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return err + } + path := "/plugins/pull?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Plugins, context: opts.Context, + headers: headers, }) if err != nil { return err } - resp.Body.Close() + defer resp.Body.Close() + // PullPlugin streams back the progress of the pull, we must consume the whole body + // otherwise the pull will be canceled on the engine. + if _, err := ioutil.ReadAll(resp.Body); err != nil { + return err + } return nil } @@ -152,7 +163,7 @@ type PluginDetail struct { // // See https://goo.gl/C4t7Tz for more details. func (c *Client) ListPlugins(ctx context.Context) ([]PluginDetail, error) { - resp, err := c.do("GET", "/plugins", doOptions{ + resp, err := c.do(http.MethodGet, "/plugins", doOptions{ context: ctx, }) if err != nil { @@ -179,7 +190,7 @@ type ListFilteredPluginsOptions struct { // See https://goo.gl/rmdmWg for more details. func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginDetail, error) { path := "/plugins/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{ + resp, err := c.do(http.MethodGet, path, doOptions{ context: opts.Context, }) if err != nil { @@ -193,12 +204,41 @@ func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginD return pluginDetails, nil } -// GetPluginPrivileges returns pulginPrivileges or an error. +// GetPluginPrivileges returns pluginPrivileges or an error. // // See https://goo.gl/C4t7Tz for more details. -func (c *Client) GetPluginPrivileges(name string, ctx context.Context) ([]PluginPrivilege, error) { - resp, err := c.do("GET", "/plugins/privileges?remote="+name, doOptions{ - context: ctx, +//nolint:golint +func (c *Client) GetPluginPrivileges(remote string, ctx context.Context) ([]PluginPrivilege, error) { + return c.GetPluginPrivilegesWithOptions( + GetPluginPrivilegesOptions{ + Remote: remote, + Context: ctx, + }) +} + +// GetPluginPrivilegesOptions specify parameters to the GetPluginPrivilegesWithOptions function. +// +// See https://goo.gl/C4t7Tz for more details. +type GetPluginPrivilegesOptions struct { + Remote string + Auth AuthConfiguration + Context context.Context +} + +// GetPluginPrivilegesWithOptions returns pluginPrivileges or an error. +// +// See https://goo.gl/C4t7Tz for more details. +//nolint:golint +func (c *Client) GetPluginPrivilegesWithOptions(opts GetPluginPrivilegesOptions) ([]PluginPrivilege, error) { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return nil, err + } + + path := "/plugins/privileges?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{ + context: opts.Context, + headers: headers, }) if err != nil { return nil, err @@ -214,21 +254,18 @@ func (c *Client) GetPluginPrivileges(name string, ctx context.Context) ([]Plugin // InspectPlugins returns a pluginDetail or an error. // // See https://goo.gl/C4t7Tz for more details. +//nolint:golint func (c *Client) InspectPlugins(name string, ctx context.Context) (*PluginDetail, error) { - resp, err := c.do("GET", "/plugins/"+name+"/json", doOptions{ + resp, err := c.do(http.MethodGet, "/plugins/"+name+"/json", doOptions{ context: ctx, }) - if err != nil { - return nil, err - } - defer resp.Body.Close() if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchPlugin{ID: name} } return nil, err } - resp.Body.Close() + defer resp.Body.Close() var pluginDetail PluginDetail if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil { return nil, err @@ -252,20 +289,26 @@ type RemovePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) { path := "/plugins/" + opts.Name + "?" + queryString(opts) - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchPlugin{ID: opts.Name} + } return nil, err } defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchPlugin{ID: opts.Name} - } return nil, err } - resp.Body.Close() + + if len(body) == 0 { + // Seems like newer docker versions won't return the plugindetail after removal + return nil, nil + } + var pluginDetail PluginDetail - if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil { + if err := json.Unmarshal(body, &pluginDetail); err != nil { return nil, err } return &pluginDetail, nil @@ -287,7 +330,7 @@ type EnablePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) EnablePlugin(opts EnablePluginOptions) error { path := "/plugins/" + opts.Name + "/enable?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return err } @@ -310,7 +353,7 @@ type DisablePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) DisablePlugin(opts DisablePluginOptions) error { path := "/plugins/" + opts.Name + "/disable" - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return err } @@ -335,7 +378,7 @@ type CreatePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) { path := "/plugins/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Path, context: opts.Context, }) @@ -365,7 +408,7 @@ type PushPluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) PushPlugin(opts PushPluginOptions) error { path := "/plugins/" + opts.Name + "/push" - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return err } @@ -389,7 +432,7 @@ type ConfigurePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) ConfigurePlugin(opts ConfigurePluginOptions) error { path := "/plugins/" + opts.Name + "/set" - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Envs, context: opts.Context, }) diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm.go b/vendor/github.com/fsouza/go-dockerclient/swarm.go index a257758fc..c1bbce763 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm.go @@ -36,7 +36,7 @@ type InitSwarmOptions struct { // See https://goo.gl/ZWyG1M for more details. func (c *Client) InitSwarm(opts InitSwarmOptions) (string, error) { path := "/swarm/init" - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.InitRequest, forceJSON: true, context: opts.Context, @@ -66,7 +66,7 @@ type JoinSwarmOptions struct { // See https://goo.gl/N59IP1 for more details. func (c *Client) JoinSwarm(opts JoinSwarmOptions) error { path := "/swarm/join" - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.JoinRequest, forceJSON: true, context: opts.Context, @@ -93,7 +93,7 @@ func (c *Client) LeaveSwarm(opts LeaveSwarmOptions) error { params := make(url.Values) params.Set("force", strconv.FormatBool(opts.Force)) path := "/swarm/leave?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ context: opts.Context, }) if err != nil { @@ -123,7 +123,7 @@ func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error { params.Set("rotateWorkerToken", strconv.FormatBool(opts.RotateWorkerToken)) params.Set("rotateManagerToken", strconv.FormatBool(opts.RotateManagerToken)) path := "/swarm/update?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Swarm, forceJSON: true, context: opts.Context, @@ -141,7 +141,7 @@ func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error { // See https://goo.gl/MFwgX9 for more details. func (c *Client) InspectSwarm(ctx context.Context) (swarm.Swarm, error) { response := swarm.Swarm{} - resp, err := c.do("GET", "/swarm", doOptions{ + resp, err := c.do(http.MethodGet, "/swarm", doOptions{ context: ctx, }) if err != nil { diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go index fb73ab2ef..399aa1dce 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go @@ -46,7 +46,7 @@ func (c *Client) CreateConfig(opts CreateConfigOptions) (*swarm.Config, error) { return nil, err } path := "/configs/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ headers: headers, data: opts.ConfigSpec, forceJSON: true, @@ -76,7 +76,7 @@ type RemoveConfigOptions struct { // See https://goo.gl/Tqrtya for more details. func (c *Client) RemoveConfig(opts RemoveConfigOptions) error { path := "/configs/" + opts.ID - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchConfig{ID: opts.ID} @@ -109,7 +109,7 @@ func (c *Client) UpdateConfig(id string, opts UpdateConfigOptions) error { } params := make(url.Values) params.Set("version", strconv.FormatUint(opts.Version, 10)) - resp, err := c.do("POST", "/configs/"+id+"/update?"+params.Encode(), doOptions{ + resp, err := c.do(http.MethodPost, "/configs/"+id+"/update?"+params.Encode(), doOptions{ headers: headers, data: opts.ConfigSpec, forceJSON: true, @@ -130,7 +130,7 @@ func (c *Client) UpdateConfig(id string, opts UpdateConfigOptions) error { // See https://goo.gl/dHmr75 for more details. func (c *Client) InspectConfig(id string) (*swarm.Config, error) { path := "/configs/" + id - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchConfig{ID: id} @@ -158,7 +158,7 @@ type ListConfigsOptions struct { // See https://goo.gl/DwvNMd for more details. func (c *Client) ListConfigs(opts ListConfigsOptions) ([]swarm.Config, error) { path := "/configs?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_node.go b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go index 095653cd9..c149db287 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_node.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go @@ -40,7 +40,7 @@ type ListNodesOptions struct { // See http://goo.gl/3K4GwU for more details. func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) { path := "/nodes?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -56,7 +56,7 @@ func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) { // // See http://goo.gl/WjkTOk for more details. func (c *Client) InspectNode(id string) (*swarm.Node, error) { - resp, err := c.do("GET", "/nodes/"+id, doOptions{}) + resp, err := c.do(http.MethodGet, "/nodes/"+id, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchNode{ID: id} @@ -87,7 +87,7 @@ func (c *Client) UpdateNode(id string, opts UpdateNodeOptions) error { params := make(url.Values) params.Set("version", strconv.FormatUint(opts.Version, 10)) path := "/nodes/" + id + "/update?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ context: opts.Context, forceJSON: true, data: opts.NodeSpec, @@ -118,7 +118,7 @@ func (c *Client) RemoveNode(opts RemoveNodeOptions) error { params := make(url.Values) params.Set("force", strconv.FormatBool(opts.Force)) path := "/nodes/" + opts.ID + "?" + params.Encode() - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchNode{ID: opts.ID} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go index 5a3b82ca5..058c4a4af 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go @@ -46,7 +46,7 @@ func (c *Client) CreateSecret(opts CreateSecretOptions) (*swarm.Secret, error) { return nil, err } path := "/secrets/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ headers: headers, data: opts.SecretSpec, forceJSON: true, @@ -76,7 +76,7 @@ type RemoveSecretOptions struct { // See https://goo.gl/Tqrtya for more details. func (c *Client) RemoveSecret(opts RemoveSecretOptions) error { path := "/secrets/" + opts.ID - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchSecret{ID: opts.ID} @@ -109,7 +109,7 @@ func (c *Client) UpdateSecret(id string, opts UpdateSecretOptions) error { } params := make(url.Values) params.Set("version", strconv.FormatUint(opts.Version, 10)) - resp, err := c.do("POST", "/secrets/"+id+"/update?"+params.Encode(), doOptions{ + resp, err := c.do(http.MethodPost, "/secrets/"+id+"/update?"+params.Encode(), doOptions{ headers: headers, data: opts.SecretSpec, forceJSON: true, @@ -130,7 +130,7 @@ func (c *Client) UpdateSecret(id string, opts UpdateSecretOptions) error { // See https://goo.gl/dHmr75 for more details. func (c *Client) InspectSecret(id string) (*swarm.Secret, error) { path := "/secrets/" + id - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchSecret{ID: id} @@ -158,7 +158,7 @@ type ListSecretsOptions struct { // See https://goo.gl/DwvNMd for more details. func (c *Client) ListSecrets(opts ListSecretsOptions) ([]swarm.Secret, error) { path := "/secrets?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go index d9c4b2acc..cedbe41e3 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go @@ -46,7 +46,7 @@ func (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error return nil, err } path := "/services/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ headers: headers, data: opts.ServiceSpec, forceJSON: true, @@ -76,7 +76,7 @@ type RemoveServiceOptions struct { // See https://goo.gl/Tqrtya for more details. func (c *Client) RemoveService(opts RemoveServiceOptions) error { path := "/services/" + opts.ID - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchService{ID: opts.ID} @@ -106,7 +106,7 @@ func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error { if err != nil { return err } - resp, err := c.do("POST", "/services/"+id+"/update?"+queryString(opts), doOptions{ + resp, err := c.do(http.MethodPost, "/services/"+id+"/update?"+queryString(opts), doOptions{ headers: headers, data: opts.ServiceSpec, forceJSON: true, @@ -127,7 +127,7 @@ func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error { // See https://goo.gl/dHmr75 for more details. func (c *Client) InspectService(id string) (*swarm.Service, error) { path := "/services/" + id - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchService{ID: id} @@ -155,7 +155,7 @@ type ListServicesOptions struct { // See https://goo.gl/DwvNMd for more details. func (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) { path := "/services?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -203,7 +203,7 @@ func (c *Client) GetServiceLogs(opts LogsServiceOptions) error { opts.Tail = "all" } path := "/services/" + opts.Service + "/logs?" + queryString(opts) - return c.stream("GET", path, streamOptions{ + return c.stream(http.MethodGet, path, streamOptions{ setRawTerminal: opts.RawTerminal, stdout: opts.OutputStream, stderr: opts.ErrorStream, diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_task.go b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go index 3b1161ab9..547642f5e 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_task.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go @@ -38,7 +38,7 @@ type ListTasksOptions struct { // See http://goo.gl/rByLzw for more details. func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) { path := "/tasks?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -54,7 +54,7 @@ func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) { // // See http://goo.gl/kyziuq for more details. func (c *Client) InspectTask(id string) (*swarm.Task, error) { - resp, err := c.do("GET", "/tasks/"+id, doOptions{}) + resp, err := c.do(http.MethodGet, "/tasks/"+id, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchTask{ID: id} diff --git a/vendor/github.com/fsouza/go-dockerclient/system.go b/vendor/github.com/fsouza/go-dockerclient/system.go index a43dfb5a2..46b9faf00 100644 --- a/vendor/github.com/fsouza/go-dockerclient/system.go +++ b/vendor/github.com/fsouza/go-dockerclient/system.go @@ -3,6 +3,7 @@ package docker import ( "context" "encoding/json" + "net/http" ) // VolumeUsageData represents usage data from the docker system api @@ -59,7 +60,7 @@ type DiskUsageOptions struct { // More Info Here https://dockr.ly/2PNzQyO func (c *Client) DiskUsage(opts DiskUsageOptions) (*DiskUsage, error) { path := "/system/df" - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go index 611da8c9e..9716a7712 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tar.go +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -13,8 +13,8 @@ import ( "path/filepath" "strings" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" - "github.com/fsouza/go-dockerclient/internal/archive" ) func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go index 07661f3d1..08e7f8ec2 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tls.go +++ b/vendor/github.com/fsouza/go-dockerclient/tls.go @@ -103,7 +103,7 @@ func copyTLSConfig(cfg *tls.Config) *tls.Config { ClientCAs: cfg.ClientCAs, ClientSessionCache: cfg.ClientSessionCache, CurvePreferences: cfg.CurvePreferences, - InsecureSkipVerify: cfg.InsecureSkipVerify, + InsecureSkipVerify: cfg.InsecureSkipVerify, //nolint:gosec MaxVersion: cfg.MaxVersion, MinVersion: cfg.MinVersion, NameToCertificate: cfg.NameToCertificate, diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go index c8f50469e..c39a273bf 100644 --- a/vendor/github.com/fsouza/go-dockerclient/volume.go +++ b/vendor/github.com/fsouza/go-dockerclient/volume.go @@ -44,7 +44,7 @@ type ListVolumesOptions struct { // // See https://goo.gl/3wgTsd for more details. func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { - resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{ + resp, err := c.do(http.MethodGet, "/volumes?"+queryString(opts), doOptions{ context: opts.Context, }) if err != nil { @@ -85,7 +85,7 @@ type CreateVolumeOptions struct { // // See https://goo.gl/qEhmEC for more details. func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { - resp, err := c.do("POST", "/volumes/create", doOptions{ + resp, err := c.do(http.MethodPost, "/volumes/create", doOptions{ data: opts, context: opts.Context, }) @@ -104,7 +104,7 @@ func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { // // See https://goo.gl/GMjsMc for more details. func (c *Client) InspectVolume(name string) (*Volume, error) { - resp, err := c.do("GET", "/volumes/"+name, doOptions{}) + resp, err := c.do(http.MethodGet, "/volumes/"+name, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, ErrNoSuchVolume @@ -142,7 +142,7 @@ type RemoveVolumeOptions struct { // See https://goo.gl/nvd6qj for more details. func (c *Client) RemoveVolumeWithOptions(opts RemoveVolumeOptions) error { path := "/volumes/" + opts.Name - resp, err := c.do("DELETE", path+"?"+queryString(opts), doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path+"?"+queryString(opts), doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok { if e.Status == http.StatusNotFound { @@ -179,7 +179,7 @@ type PruneVolumesResults struct { // See https://goo.gl/f9XDem for more details. func (c *Client) PruneVolumes(opts PruneVolumesOptions) (*PruneVolumesResults, error) { path := "/volumes/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/ijc/Gotty/LICENSE b/vendor/github.com/ijc/Gotty/LICENSE deleted file mode 100644 index 0b71c9736..000000000 --- a/vendor/github.com/ijc/Gotty/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, -either expressed or implied, of the FreeBSD Project. diff --git a/vendor/github.com/ijc/Gotty/README b/vendor/github.com/ijc/Gotty/README deleted file mode 100644 index a6b0d9a8f..000000000 --- a/vendor/github.com/ijc/Gotty/README +++ /dev/null @@ -1,5 +0,0 @@ -Gotty is a library written in Go that determines and reads termcap database -files to produce an interface for interacting with the capabilities of a -terminal. -See the godoc documentation or the source code for more information about -function usage. diff --git a/vendor/github.com/ijc/Gotty/TODO b/vendor/github.com/ijc/Gotty/TODO deleted file mode 100644 index 470460531..000000000 --- a/vendor/github.com/ijc/Gotty/TODO +++ /dev/null @@ -1,3 +0,0 @@ -gotty.go:// TODO add more concurrency to name lookup, look for more opportunities. -all:// TODO add more documentation, with function usage in a doc.go file. -all:// TODO add more testing/benchmarking with go test. diff --git a/vendor/github.com/ijc/Gotty/attributes.go b/vendor/github.com/ijc/Gotty/attributes.go deleted file mode 100644 index a4c005fae..000000000 --- a/vendor/github.com/ijc/Gotty/attributes.go +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -// Boolean capabilities -var BoolAttr = [...]string{ - "auto_left_margin", "bw", - "auto_right_margin", "am", - "no_esc_ctlc", "xsb", - "ceol_standout_glitch", "xhp", - "eat_newline_glitch", "xenl", - "erase_overstrike", "eo", - "generic_type", "gn", - "hard_copy", "hc", - "has_meta_key", "km", - "has_status_line", "hs", - "insert_null_glitch", "in", - "memory_above", "da", - "memory_below", "db", - "move_insert_mode", "mir", - "move_standout_mode", "msgr", - "over_strike", "os", - "status_line_esc_ok", "eslok", - "dest_tabs_magic_smso", "xt", - "tilde_glitch", "hz", - "transparent_underline", "ul", - "xon_xoff", "nxon", - "needs_xon_xoff", "nxon", - "prtr_silent", "mc5i", - "hard_cursor", "chts", - "non_rev_rmcup", "nrrmc", - "no_pad_char", "npc", - "non_dest_scroll_region", "ndscr", - "can_change", "ccc", - "back_color_erase", "bce", - "hue_lightness_saturation", "hls", - "col_addr_glitch", "xhpa", - "cr_cancels_micro_mode", "crxm", - "has_print_wheel", "daisy", - "row_addr_glitch", "xvpa", - "semi_auto_right_margin", "sam", - "cpi_changes_res", "cpix", - "lpi_changes_res", "lpix", - "backspaces_with_bs", "", - "crt_no_scrolling", "", - "no_correctly_working_cr", "", - "gnu_has_meta_key", "", - "linefeed_is_newline", "", - "has_hardware_tabs", "", - "return_does_clr_eol", "", -} - -// Numerical capabilities -var NumAttr = [...]string{ - "columns", "cols", - "init_tabs", "it", - "lines", "lines", - "lines_of_memory", "lm", - "magic_cookie_glitch", "xmc", - "padding_baud_rate", "pb", - "virtual_terminal", "vt", - "width_status_line", "wsl", - "num_labels", "nlab", - "label_height", "lh", - "label_width", "lw", - "max_attributes", "ma", - "maximum_windows", "wnum", - "max_colors", "colors", - "max_pairs", "pairs", - "no_color_video", "ncv", - "buffer_capacity", "bufsz", - "dot_vert_spacing", "spinv", - "dot_horz_spacing", "spinh", - "max_micro_address", "maddr", - "max_micro_jump", "mjump", - "micro_col_size", "mcs", - "micro_line_size", "mls", - "number_of_pins", "npins", - "output_res_char", "orc", - "output_res_line", "orl", - "output_res_horz_inch", "orhi", - "output_res_vert_inch", "orvi", - "print_rate", "cps", - "wide_char_size", "widcs", - "buttons", "btns", - "bit_image_entwining", "bitwin", - "bit_image_type", "bitype", - "magic_cookie_glitch_ul", "", - "carriage_return_delay", "", - "new_line_delay", "", - "backspace_delay", "", - "horizontal_tab_delay", "", - "number_of_function_keys", "", -} - -// String capabilities -var StrAttr = [...]string{ - "back_tab", "cbt", - "bell", "bel", - "carriage_return", "cr", - "change_scroll_region", "csr", - "clear_all_tabs", "tbc", - "clear_screen", "clear", - "clr_eol", "el", - "clr_eos", "ed", - "column_address", "hpa", - "command_character", "cmdch", - "cursor_address", "cup", - "cursor_down", "cud1", - "cursor_home", "home", - "cursor_invisible", "civis", - "cursor_left", "cub1", - "cursor_mem_address", "mrcup", - "cursor_normal", "cnorm", - "cursor_right", "cuf1", - "cursor_to_ll", "ll", - "cursor_up", "cuu1", - "cursor_visible", "cvvis", - "delete_character", "dch1", - "delete_line", "dl1", - "dis_status_line", "dsl", - "down_half_line", "hd", - "enter_alt_charset_mode", "smacs", - "enter_blink_mode", "blink", - "enter_bold_mode", "bold", - "enter_ca_mode", "smcup", - "enter_delete_mode", "smdc", - "enter_dim_mode", "dim", - "enter_insert_mode", "smir", - "enter_secure_mode", "invis", - "enter_protected_mode", "prot", - "enter_reverse_mode", "rev", - "enter_standout_mode", "smso", - "enter_underline_mode", "smul", - "erase_chars", "ech", - "exit_alt_charset_mode", "rmacs", - "exit_attribute_mode", "sgr0", - "exit_ca_mode", "rmcup", - "exit_delete_mode", "rmdc", - "exit_insert_mode", "rmir", - "exit_standout_mode", "rmso", - "exit_underline_mode", "rmul", - "flash_screen", "flash", - "form_feed", "ff", - "from_status_line", "fsl", - "init_1string", "is1", - "init_2string", "is2", - "init_3string", "is3", - "init_file", "if", - "insert_character", "ich1", - "insert_line", "il1", - "insert_padding", "ip", - "key_backspace", "kbs", - "key_catab", "ktbc", - "key_clear", "kclr", - "key_ctab", "kctab", - "key_dc", "kdch1", - "key_dl", "kdl1", - "key_down", "kcud1", - "key_eic", "krmir", - "key_eol", "kel", - "key_eos", "ked", - "key_f0", "kf0", - "key_f1", "kf1", - "key_f10", "kf10", - "key_f2", "kf2", - "key_f3", "kf3", - "key_f4", "kf4", - "key_f5", "kf5", - "key_f6", "kf6", - "key_f7", "kf7", - "key_f8", "kf8", - "key_f9", "kf9", - "key_home", "khome", - "key_ic", "kich1", - "key_il", "kil1", - "key_left", "kcub1", - "key_ll", "kll", - "key_npage", "knp", - "key_ppage", "kpp", - "key_right", "kcuf1", - "key_sf", "kind", - "key_sr", "kri", - "key_stab", "khts", - "key_up", "kcuu1", - "keypad_local", "rmkx", - "keypad_xmit", "smkx", - "lab_f0", "lf0", - "lab_f1", "lf1", - "lab_f10", "lf10", - "lab_f2", "lf2", - "lab_f3", "lf3", - "lab_f4", "lf4", - "lab_f5", "lf5", - "lab_f6", "lf6", - "lab_f7", "lf7", - "lab_f8", "lf8", - "lab_f9", "lf9", - "meta_off", "rmm", - "meta_on", "smm", - "newline", "_glitch", - "pad_char", "npc", - "parm_dch", "dch", - "parm_delete_line", "dl", - "parm_down_cursor", "cud", - "parm_ich", "ich", - "parm_index", "indn", - "parm_insert_line", "il", - "parm_left_cursor", "cub", - "parm_right_cursor", "cuf", - "parm_rindex", "rin", - "parm_up_cursor", "cuu", - "pkey_key", "pfkey", - "pkey_local", "pfloc", - "pkey_xmit", "pfx", - "print_screen", "mc0", - "prtr_off", "mc4", - "prtr_on", "mc5", - "repeat_char", "rep", - "reset_1string", "rs1", - "reset_2string", "rs2", - "reset_3string", "rs3", - "reset_file", "rf", - "restore_cursor", "rc", - "row_address", "mvpa", - "save_cursor", "row_address", - "scroll_forward", "ind", - "scroll_reverse", "ri", - "set_attributes", "sgr", - "set_tab", "hts", - "set_window", "wind", - "tab", "s_magic_smso", - "to_status_line", "tsl", - "underline_char", "uc", - "up_half_line", "hu", - "init_prog", "iprog", - "key_a1", "ka1", - "key_a3", "ka3", - "key_b2", "kb2", - "key_c1", "kc1", - "key_c3", "kc3", - "prtr_non", "mc5p", - "char_padding", "rmp", - "acs_chars", "acsc", - "plab_norm", "pln", - "key_btab", "kcbt", - "enter_xon_mode", "smxon", - "exit_xon_mode", "rmxon", - "enter_am_mode", "smam", - "exit_am_mode", "rmam", - "xon_character", "xonc", - "xoff_character", "xoffc", - "ena_acs", "enacs", - "label_on", "smln", - "label_off", "rmln", - "key_beg", "kbeg", - "key_cancel", "kcan", - "key_close", "kclo", - "key_command", "kcmd", - "key_copy", "kcpy", - "key_create", "kcrt", - "key_end", "kend", - "key_enter", "kent", - "key_exit", "kext", - "key_find", "kfnd", - "key_help", "khlp", - "key_mark", "kmrk", - "key_message", "kmsg", - "key_move", "kmov", - "key_next", "knxt", - "key_open", "kopn", - "key_options", "kopt", - "key_previous", "kprv", - "key_print", "kprt", - "key_redo", "krdo", - "key_reference", "kref", - "key_refresh", "krfr", - "key_replace", "krpl", - "key_restart", "krst", - "key_resume", "kres", - "key_save", "ksav", - "key_suspend", "kspd", - "key_undo", "kund", - "key_sbeg", "kBEG", - "key_scancel", "kCAN", - "key_scommand", "kCMD", - "key_scopy", "kCPY", - "key_screate", "kCRT", - "key_sdc", "kDC", - "key_sdl", "kDL", - "key_select", "kslt", - "key_send", "kEND", - "key_seol", "kEOL", - "key_sexit", "kEXT", - "key_sfind", "kFND", - "key_shelp", "kHLP", - "key_shome", "kHOM", - "key_sic", "kIC", - "key_sleft", "kLFT", - "key_smessage", "kMSG", - "key_smove", "kMOV", - "key_snext", "kNXT", - "key_soptions", "kOPT", - "key_sprevious", "kPRV", - "key_sprint", "kPRT", - "key_sredo", "kRDO", - "key_sreplace", "kRPL", - "key_sright", "kRIT", - "key_srsume", "kRES", - "key_ssave", "kSAV", - "key_ssuspend", "kSPD", - "key_sundo", "kUND", - "req_for_input", "rfi", - "key_f11", "kf11", - "key_f12", "kf12", - "key_f13", "kf13", - "key_f14", "kf14", - "key_f15", "kf15", - "key_f16", "kf16", - "key_f17", "kf17", - "key_f18", "kf18", - "key_f19", "kf19", - "key_f20", "kf20", - "key_f21", "kf21", - "key_f22", "kf22", - "key_f23", "kf23", - "key_f24", "kf24", - "key_f25", "kf25", - "key_f26", "kf26", - "key_f27", "kf27", - "key_f28", "kf28", - "key_f29", "kf29", - "key_f30", "kf30", - "key_f31", "kf31", - "key_f32", "kf32", - "key_f33", "kf33", - "key_f34", "kf34", - "key_f35", "kf35", - "key_f36", "kf36", - "key_f37", "kf37", - "key_f38", "kf38", - "key_f39", "kf39", - "key_f40", "kf40", - "key_f41", "kf41", - "key_f42", "kf42", - "key_f43", "kf43", - "key_f44", "kf44", - "key_f45", "kf45", - "key_f46", "kf46", - "key_f47", "kf47", - "key_f48", "kf48", - "key_f49", "kf49", - "key_f50", "kf50", - "key_f51", "kf51", - "key_f52", "kf52", - "key_f53", "kf53", - "key_f54", "kf54", - "key_f55", "kf55", - "key_f56", "kf56", - "key_f57", "kf57", - "key_f58", "kf58", - "key_f59", "kf59", - "key_f60", "kf60", - "key_f61", "kf61", - "key_f62", "kf62", - "key_f63", "kf63", - "clr_bol", "el1", - "clear_margins", "mgc", - "set_left_margin", "smgl", - "set_right_margin", "smgr", - "label_format", "fln", - "set_clock", "sclk", - "display_clock", "dclk", - "remove_clock", "rmclk", - "create_window", "cwin", - "goto_window", "wingo", - "hangup", "hup", - "dial_phone", "dial", - "quick_dial", "qdial", - "tone", "tone", - "pulse", "pulse", - "flash_hook", "hook", - "fixed_pause", "pause", - "wait_tone", "wait", - "user0", "u0", - "user1", "u1", - "user2", "u2", - "user3", "u3", - "user4", "u4", - "user5", "u5", - "user6", "u6", - "user7", "u7", - "user8", "u8", - "user9", "u9", - "orig_pair", "op", - "orig_colors", "oc", - "initialize_color", "initc", - "initialize_pair", "initp", - "set_color_pair", "scp", - "set_foreground", "setf", - "set_background", "setb", - "change_char_pitch", "cpi", - "change_line_pitch", "lpi", - "change_res_horz", "chr", - "change_res_vert", "cvr", - "define_char", "defc", - "enter_doublewide_mode", "swidm", - "enter_draft_quality", "sdrfq", - "enter_italics_mode", "sitm", - "enter_leftward_mode", "slm", - "enter_micro_mode", "smicm", - "enter_near_letter_quality", "snlq", - "enter_normal_quality", "snrmq", - "enter_shadow_mode", "sshm", - "enter_subscript_mode", "ssubm", - "enter_superscript_mode", "ssupm", - "enter_upward_mode", "sum", - "exit_doublewide_mode", "rwidm", - "exit_italics_mode", "ritm", - "exit_leftward_mode", "rlm", - "exit_micro_mode", "rmicm", - "exit_shadow_mode", "rshm", - "exit_subscript_mode", "rsubm", - "exit_superscript_mode", "rsupm", - "exit_upward_mode", "rum", - "micro_column_address", "mhpa", - "micro_down", "mcud1", - "micro_left", "mcub1", - "micro_right", "mcuf1", - "micro_row_address", "mvpa", - "micro_up", "mcuu1", - "order_of_pins", "porder", - "parm_down_micro", "mcud", - "parm_left_micro", "mcub", - "parm_right_micro", "mcuf", - "parm_up_micro", "mcuu", - "select_char_set", "scs", - "set_bottom_margin", "smgb", - "set_bottom_margin_parm", "smgbp", - "set_left_margin_parm", "smglp", - "set_right_margin_parm", "smgrp", - "set_top_margin", "smgt", - "set_top_margin_parm", "smgtp", - "start_bit_image", "sbim", - "start_char_set_def", "scsd", - "stop_bit_image", "rbim", - "stop_char_set_def", "rcsd", - "subscript_characters", "subcs", - "superscript_characters", "supcs", - "these_cause_cr", "docr", - "zero_motion", "zerom", - "char_set_names", "csnm", - "key_mouse", "kmous", - "mouse_info", "minfo", - "req_mouse_pos", "reqmp", - "get_mouse", "getm", - "set_a_foreground", "setaf", - "set_a_background", "setab", - "pkey_plab", "pfxl", - "device_type", "devt", - "code_set_init", "csin", - "set0_des_seq", "s0ds", - "set1_des_seq", "s1ds", - "set2_des_seq", "s2ds", - "set3_des_seq", "s3ds", - "set_lr_margin", "smglr", - "set_tb_margin", "smgtb", - "bit_image_repeat", "birep", - "bit_image_newline", "binel", - "bit_image_carriage_return", "bicr", - "color_names", "colornm", - "define_bit_image_region", "defbi", - "end_bit_image_region", "endbi", - "set_color_band", "setcolor", - "set_page_length", "slines", - "display_pc_char", "dispc", - "enter_pc_charset_mode", "smpch", - "exit_pc_charset_mode", "rmpch", - "enter_scancode_mode", "smsc", - "exit_scancode_mode", "rmsc", - "pc_term_options", "pctrm", - "scancode_escape", "scesc", - "alt_scancode_esc", "scesa", - "enter_horizontal_hl_mode", "ehhlm", - "enter_left_hl_mode", "elhlm", - "enter_low_hl_mode", "elohlm", - "enter_right_hl_mode", "erhlm", - "enter_top_hl_mode", "ethlm", - "enter_vertical_hl_mode", "evhlm", - "set_a_attributes", "sgr1", - "set_pglen_inch", "slength", - "termcap_init2", "", - "termcap_reset", "", - "linefeed_if_not_lf", "", - "backspace_if_not_bs", "", - "other_non_function_keys", "", - "arrow_key_map", "", - "acs_ulcorner", "", - "acs_llcorner", "", - "acs_urcorner", "", - "acs_lrcorner", "", - "acs_ltee", "", - "acs_rtee", "", - "acs_btee", "", - "acs_ttee", "", - "acs_hline", "", - "acs_vline", "", - "acs_plus", "", - "memory_lock", "", - "memory_unlock", "", - "box_chars_1", "", -} diff --git a/vendor/github.com/ijc/Gotty/gotty.go b/vendor/github.com/ijc/Gotty/gotty.go deleted file mode 100644 index c329778a1..000000000 --- a/vendor/github.com/ijc/Gotty/gotty.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Gotty is a Go-package for reading and parsing the terminfo database -package gotty - -// TODO add more concurrency to name lookup, look for more opportunities. - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "os" - "path" - "reflect" - "strings" - "sync" -) - -// Open a terminfo file by the name given and construct a TermInfo object. -// If something went wrong reading the terminfo database file, an error is -// returned. -func OpenTermInfo(termName string) (*TermInfo, error) { - if len(termName) == 0 { - return nil, errors.New("No termname given") - } - // Find the environment variables - if termloc := os.Getenv("TERMINFO"); len(termloc) > 0 { - return readTermInfo(path.Join(termloc, string(termName[0]), termName)) - } else { - // Search like ncurses - locations := []string{} - if h := os.Getenv("HOME"); len(h) > 0 { - locations = append(locations, path.Join(h, ".terminfo")) - } - locations = append(locations, - "/etc/terminfo/", - "/lib/terminfo/", - "/usr/share/terminfo/") - for _, str := range locations { - term, err := readTermInfo(path.Join(str, string(termName[0]), termName)) - if err == nil { - return term, nil - } - } - return nil, errors.New("No terminfo file(-location) found") - } -} - -// Open a terminfo file from the environment variable containing the current -// terminal name and construct a TermInfo object. If something went wrong -// reading the terminfo database file, an error is returned. -func OpenTermInfoEnv() (*TermInfo, error) { - termenv := os.Getenv("TERM") - return OpenTermInfo(termenv) -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. -func (term *TermInfo) GetAttribute(attr string) (stacker, error) { - // Channel to store the main value in. - var value stacker - // Add a blocking WaitGroup - var block sync.WaitGroup - // Keep track of variable being written. - written := false - // Function to put into goroutine. - f := func(ats interface{}) { - var ok bool - var v stacker - // Switch on type of map to use and assign value to it. - switch reflect.TypeOf(ats).Elem().Kind() { - case reflect.Bool: - v, ok = ats.(map[string]bool)[attr] - case reflect.Int16: - v, ok = ats.(map[string]int16)[attr] - case reflect.String: - v, ok = ats.(map[string]string)[attr] - } - // If ok, a value is found, so we can write. - if ok { - value = v - written = true - } - // Goroutine is done - block.Done() - } - block.Add(3) - // Go for all 3 attribute lists. - go f(term.boolAttributes) - go f(term.numAttributes) - go f(term.strAttributes) - // Wait until every goroutine is done. - block.Wait() - // If a value has been written, return it. - if written { - return value, nil - } - // Otherwise, error. - return nil, fmt.Errorf("Erorr finding attribute") -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. A name is first converted to its termcap value. -func (term *TermInfo) GetAttributeName(name string) (stacker, error) { - tc := GetTermcapName(name) - return term.GetAttribute(tc) -} - -// A utility function that finds and returns the termcap equivalent of a -// variable name. -func GetTermcapName(name string) string { - // Termcap name - var tc string - // Blocking group - var wait sync.WaitGroup - // Function to put into a goroutine - f := func(attrs []string) { - // Find the string corresponding to the name - for i, s := range attrs { - if s == name { - tc = attrs[i+1] - } - } - // Goroutine is finished - wait.Done() - } - wait.Add(3) - // Go for all 3 attribute lists - go f(BoolAttr[:]) - go f(NumAttr[:]) - go f(StrAttr[:]) - // Wait until every goroutine is done - wait.Wait() - // Return the termcap name - return tc -} - -// This function takes a path to a terminfo file and reads it in binary -// form to construct the actual TermInfo file. -func readTermInfo(path string) (*TermInfo, error) { - // Open the terminfo file - file, err := os.Open(path) - defer file.Close() - if err != nil { - return nil, err - } - - // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize - // Header is composed of the magic 0432 octal number, size of the name - // section, size of the boolean section, the amount of number values, - // the number of offsets of strings, and the size of the string section. - var header [6]int16 - // Byte array is used to read in byte values - var byteArray []byte - // Short array is used to read in short values - var shArray []int16 - // TermInfo object to store values - var term TermInfo - - // Read in the header - err = binary.Read(file, binary.LittleEndian, &header) - if err != nil { - return nil, err - } - // If magic number isn't there or isn't correct, we have the wrong filetype - if header[0] != 0432 { - return nil, errors.New(fmt.Sprintf("Wrong filetype")) - } - - // Read in the names - byteArray = make([]byte, header[1]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.Names = strings.Split(string(byteArray), "|") - - // Read in the booleans - byteArray = make([]byte, header[2]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.boolAttributes = make(map[string]bool) - for i, b := range byteArray { - if b == 1 { - term.boolAttributes[BoolAttr[i*2+1]] = true - } - } - // If the number of bytes read is not even, a byte for alignment is added - // We know the header is an even number of bytes so only need to check the - // total of the names and booleans. - if (header[1]+header[2])%2 != 0 { - err = binary.Read(file, binary.LittleEndian, make([]byte, 1)) - if err != nil { - return nil, err - } - } - - // Read in shorts - shArray = make([]int16, header[3]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - term.numAttributes = make(map[string]int16) - for i, n := range shArray { - if n != 0377 && n > -1 { - term.numAttributes[NumAttr[i*2+1]] = n - } - } - - // Read the offsets into the short array - shArray = make([]int16, header[4]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - // Read the actual strings in the byte array - byteArray = make([]byte, header[5]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.strAttributes = make(map[string]string) - // We get an offset, and then iterate until the string is null-terminated - for i, offset := range shArray { - if offset > -1 { - if int(offset) >= len(byteArray) { - return nil, errors.New("array out of bounds reading string section") - } - r := bytes.IndexByte(byteArray[offset:], 0) - if r == -1 { - return nil, errors.New("missing nul byte reading string section") - } - r += int(offset) - term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r]) - } - } - return &term, nil -} diff --git a/vendor/github.com/ijc/Gotty/parser.go b/vendor/github.com/ijc/Gotty/parser.go deleted file mode 100644 index a9d5d23c5..000000000 --- a/vendor/github.com/ijc/Gotty/parser.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var exp = [...]string{ - "%%", - "%c", - "%s", - "%p(\\d)", - "%P([A-z])", - "%g([A-z])", - "%'(.)'", - "%{([0-9]+)}", - "%l", - "%\\+|%-|%\\*|%/|%m", - "%&|%\\||%\\^", - "%=|%>|%<", - "%A|%O", - "%!|%~", - "%i", - "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]", - "%\\?(.*?);", -} - -var regex *regexp.Regexp -var staticVar map[byte]stacker - -// Parses the attribute that is received with name attr and parameters params. -func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) { - // Get the attribute name first. - iface, err := term.GetAttribute(attr) - str, ok := iface.(string) - if err != nil { - return "", err - } - if !ok { - return str, errors.New("Only string capabilities can be parsed.") - } - // Construct the hidden parser struct so we can use a recursive stack based - // parser. - ps := &parser{} - // Dynamic variables only exist in this context. - ps.dynamicVar = make(map[byte]stacker, 26) - ps.parameters = make([]stacker, len(params)) - // Convert the parameters to insert them into the parser struct. - for i, x := range params { - ps.parameters[i] = x - } - // Recursively walk and return. - result, err := ps.walk(str) - return result, err -} - -// Parses the attribute that is received with name attr and parameters params. -// Only works on full name of a capability that is given, which it uses to -// search for the termcap name. -func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) { - tc := GetTermcapName(attr) - return term.Parse(tc, params) -} - -// Identify each token in a stack based manner and do the actual parsing. -func (ps *parser) walk(attr string) (string, error) { - // We use a buffer to get the modified string. - var buf bytes.Buffer - // Next, find and identify all tokens by their indices and strings. - tokens := regex.FindAllStringSubmatch(attr, -1) - if len(tokens) == 0 { - return attr, nil - } - indices := regex.FindAllStringIndex(attr, -1) - q := 0 // q counts the matches of one token - // Iterate through the string per character. - for i := 0; i < len(attr); i++ { - // If the current position is an identified token, execute the following - // steps. - if q < len(indices) && i >= indices[q][0] && i < indices[q][1] { - // Switch on token. - switch { - case tokens[q][0][:2] == "%%": - // Literal percentage character. - buf.WriteByte('%') - case tokens[q][0][:2] == "%c": - // Pop a character. - c, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - buf.WriteByte(c.(byte)) - case tokens[q][0][:2] == "%s": - // Pop a string. - str, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := str.(string); !ok { - return buf.String(), errors.New("Stack head is not a string") - } - buf.WriteString(str.(string)) - case tokens[q][0][:2] == "%p": - // Push a parameter on the stack. - index, err := strconv.ParseInt(tokens[q][1], 10, 8) - index-- - if err != nil { - return buf.String(), err - } - if int(index) >= len(ps.parameters) { - return buf.String(), errors.New("Parameters index out of bound") - } - ps.st.push(ps.parameters[index]) - case tokens[q][0][:2] == "%P": - // Pop a variable from the stack as a dynamic or static variable. - val, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - index := tokens[q][2] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid dynamic variables index", - index) - return buf.String(), errors.New(errorStr) - } - // Specify either dynamic or static. - if index[0] >= 'a' && index[0] <= 'z' { - ps.dynamicVar[index[0]] = val - } else if index[0] >= 'A' && index[0] <= 'Z' { - staticVar[index[0]] = val - } - case tokens[q][0][:2] == "%g": - // Push a variable from the stack as a dynamic or static variable. - index := tokens[q][3] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid static variables index", - index) - return buf.String(), errors.New(errorStr) - } - var val stacker - if index[0] >= 'a' && index[0] <= 'z' { - val = ps.dynamicVar[index[0]] - } else if index[0] >= 'A' && index[0] <= 'Z' { - val = staticVar[index[0]] - } - ps.st.push(val) - case tokens[q][0][:2] == "%'": - // Push a character constant. - con := tokens[q][4] - if len(con) > 1 { - errorStr := fmt.Sprintf("%s is not a valid character constant", con) - return buf.String(), errors.New(errorStr) - } - ps.st.push(con[0]) - case tokens[q][0][:2] == "%{": - // Push an integer constant. - con, err := strconv.ParseInt(tokens[q][5], 10, 32) - if err != nil { - return buf.String(), err - } - ps.st.push(con) - case tokens[q][0][:2] == "%l": - // Push the length of the string that is popped from the stack. - popStr, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := popStr.(string); !ok { - errStr := fmt.Sprintf("Stack head is not a string") - return buf.String(), errors.New(errStr) - } - ps.st.push(len(popStr.(string))) - case tokens[q][0][:2] == "%?": - // If-then-else construct. First, the whole string is identified and - // then inside this substring, we can specify which parts to switch on. - ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);") - ifTokens := ifReg.FindStringSubmatch(tokens[q][0]) - var ( - ifStr string - err error - ) - // Parse the if-part to determine if-else. - if len(ifTokens[1]) > 0 { - ifStr, err = ps.walk(ifTokens[1]) - } else { // else - ifStr, err = ps.walk(ifTokens[4]) - } - // Return any errors - if err != nil { - return buf.String(), err - } else if len(ifStr) > 0 { - // Self-defined limitation, not sure if this is correct, but didn't - // seem like it. - return buf.String(), errors.New("If-clause cannot print statements") - } - var thenStr string - // Pop the first value that is set by parsing the if-clause. - choose, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // Switch to if or else. - if choose.(int) == 0 && len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[3]) - } else if choose.(int) != 0 { - if len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[2]) - } else { - thenStr, err = ps.walk(ifTokens[5]) - } - } - if err != nil { - return buf.String(), err - } - buf.WriteString(thenStr) - case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits. - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'x': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'X': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 's': - token := tokens[q][0] - // Remove the : that comes before a flag. - if token[1] == ':' { - token = token[:1] + token[2:] - } - digit, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // The rest is determined like the normal formatted prints. - digitStr := fmt.Sprintf(token, digit.(int)) - buf.WriteString(digitStr) - case tokens[q][0][:2] == "%i": - // Increment the parameters by one. - if len(ps.parameters) < 2 { - return buf.String(), errors.New("Not enough parameters to increment.") - } - val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int) - val1++ - val2++ - ps.parameters[0], ps.parameters[1] = val1, val2 - default: - // The rest of the tokens is a special case, where two values are - // popped and then operated on by the token that comes after them. - op1, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - op2, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - var result stacker - switch tokens[q][0][:2] { - case "%+": - // Addition - result = op2.(int) + op1.(int) - case "%-": - // Subtraction - result = op2.(int) - op1.(int) - case "%*": - // Multiplication - result = op2.(int) * op1.(int) - case "%/": - // Division - result = op2.(int) / op1.(int) - case "%m": - // Modulo - result = op2.(int) % op1.(int) - case "%&": - // Bitwise AND - result = op2.(int) & op1.(int) - case "%|": - // Bitwise OR - result = op2.(int) | op1.(int) - case "%^": - // Bitwise XOR - result = op2.(int) ^ op1.(int) - case "%=": - // Equals - result = op2 == op1 - case "%>": - // Greater-than - result = op2.(int) > op1.(int) - case "%<": - // Lesser-than - result = op2.(int) < op1.(int) - case "%A": - // Logical AND - result = op2.(bool) && op1.(bool) - case "%O": - // Logical OR - result = op2.(bool) || op1.(bool) - case "%!": - // Logical complement - result = !op1.(bool) - case "%~": - // Bitwise complement - result = ^(op1.(int)) - } - ps.st.push(result) - } - - i = indices[q][1] - 1 - q++ - } else { - // We are not "inside" a token, so just skip until the end or the next - // token, and add all characters to the buffer. - j := i - if q != len(indices) { - for !(j >= indices[q][0] && j < indices[q][1]) { - j++ - } - } else { - j = len(attr) - } - buf.WriteString(string(attr[i:j])) - i = j - } - } - // Return the buffer as a string. - return buf.String(), nil -} - -// Push a stacker-value onto the stack. -func (st *stack) push(s stacker) { - *st = append(*st, s) -} - -// Pop a stacker-value from the stack. -func (st *stack) pop() (stacker, error) { - if len(*st) == 0 { - return nil, errors.New("Stack is empty.") - } - newStack := make(stack, len(*st)-1) - val := (*st)[len(*st)-1] - copy(newStack, (*st)[:len(*st)-1]) - *st = newStack - return val, nil -} - -// Initialize regexes and the static vars (that don't get changed between -// calls. -func init() { - // Initialize the main regex. - expStr := strings.Join(exp[:], "|") - regex, _ = regexp.Compile(expStr) - // Initialize the static variables. - staticVar = make(map[byte]stacker, 26) -} diff --git a/vendor/github.com/ijc/Gotty/types.go b/vendor/github.com/ijc/Gotty/types.go deleted file mode 100644 index 9bcc65e9b..000000000 --- a/vendor/github.com/ijc/Gotty/types.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -type TermInfo struct { - boolAttributes map[string]bool - numAttributes map[string]int16 - strAttributes map[string]string - // The various names of the TermInfo file. - Names []string -} - -type stacker interface { -} -type stack []stacker - -type parser struct { - st stack - parameters []stacker - dynamicVar map[byte]stacker -} diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE new file mode 100644 index 000000000..1c2640164 --- /dev/null +++ b/vendor/github.com/morikuni/aec/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Taihei Morikuni + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md new file mode 100644 index 000000000..3cbc4343e --- /dev/null +++ b/vendor/github.com/morikuni/aec/README.md @@ -0,0 +1,178 @@ +# aec + +[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) + +Go wrapper for ANSI escape code. + +## Install + +```bash +go get github.com/morikuni/aec +``` + +## Features + +ANSI escape codes depend on terminal environment. +Some of these features may not work. +Check supported Font-Style/Font-Color features with [checkansi](./checkansi). + +[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. + +### Cursor + +- `Up(n)` +- `Down(n)` +- `Right(n)` +- `Left(n)` +- `NextLine(n)` +- `PreviousLine(n)` +- `Column(col)` +- `Position(row, col)` +- `Save` +- `Restore` +- `Hide` +- `Show` +- `Report` + +### Erase + +- `EraseDisplay(mode)` +- `EraseLine(mode)` + +### Scroll + +- `ScrollUp(n)` +- `ScrollDown(n)` + +### Font Style + +- `Bold` +- `Faint` +- `Italic` +- `Underline` +- `BlinkSlow` +- `BlinkRapid` +- `Inverse` +- `Conceal` +- `CrossOut` +- `Frame` +- `Encircle` +- `Overline` + +### Font Color + +Foreground color. + +- `DefaultF` +- `BlackF` +- `RedF` +- `GreenF` +- `YellowF` +- `BlueF` +- `MagentaF` +- `CyanF` +- `WhiteF` +- `LightBlackF` +- `LightRedF` +- `LightGreenF` +- `LightYellowF` +- `LightBlueF` +- `LightMagentaF` +- `LightCyanF` +- `LightWhiteF` +- `Color3BitF(color)` +- `Color8BitF(color)` +- `FullColorF(r, g, b)` + +Background color. + +- `DefaultB` +- `BlackB` +- `RedB` +- `GreenB` +- `YellowB` +- `BlueB` +- `MagentaB` +- `CyanB` +- `WhiteB` +- `LightBlackB` +- `LightRedB` +- `LightGreenB` +- `LightYellowB` +- `LightBlueB` +- `LightMagentaB` +- `LightCyanB` +- `LightWhiteB` +- `Color3BitB(color)` +- `Color8BitB(color)` +- `FullColorB(r, g, b)` + +### Color Converter + +24bit RGB color to ANSI color. + +- `NewRGB3Bit(r, g, b)` +- `NewRGB8Bit(r, g, b)` + +### Builder + +To mix these features. + +```go +custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI +custom.Apply("Hello World") +``` + +## Usage + +1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` +2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` + +`aec.Reset` should be added when using font style or font color features. + +## Example + +Simple progressbar. + +![sample](./sample.gif) + +```go +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/morikuni/aec" +) + +func main() { + const n = 20 + builder := aec.EmptyBuilder + + up2 := aec.Up(2) + col := aec.Column(n + 2) + bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) + label := builder.LightRedF().Underline().With(col).Right(1).ANSI + + // for up2 + fmt.Println() + fmt.Println() + + for i := 0; i <= n; i++ { + fmt.Print(up2) + fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) + fmt.Print("[") + fmt.Print(bar.Apply(strings.Repeat("=", i))) + fmt.Println(col.Apply("]")) + time.Sleep(100 * time.Millisecond) + } +} +``` + +## License + +[MIT](./LICENSE) + + diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go new file mode 100644 index 000000000..566be6eb1 --- /dev/null +++ b/vendor/github.com/morikuni/aec/aec.go @@ -0,0 +1,137 @@ +package aec + +import "fmt" + +// EraseMode is listed in a variable EraseModes. +type EraseMode uint + +var ( + // EraseModes is a list of EraseMode. + EraseModes struct { + // All erase all. + All EraseMode + + // Head erase to head. + Head EraseMode + + // Tail erase to tail. + Tail EraseMode + } + + // Save saves the cursor position. + Save ANSI + + // Restore restores the cursor position. + Restore ANSI + + // Hide hides the cursor. + Hide ANSI + + // Show shows the cursor. + Show ANSI + + // Report reports the cursor position. + Report ANSI +) + +// Up moves up the cursor. +func Up(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dA", n)) +} + +// Down moves down the cursor. +func Down(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dB", n)) +} + +// Right moves right the cursor. +func Right(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dC", n)) +} + +// Left moves left the cursor. +func Left(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dD", n)) +} + +// NextLine moves down the cursor to head of a line. +func NextLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dE", n)) +} + +// PreviousLine moves up the cursor to head of a line. +func PreviousLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dF", n)) +} + +// Column set the cursor position to a given column. +func Column(col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dG", col)) +} + +// Position set the cursor position to a given absolute position. +func Position(row, col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) +} + +// EraseDisplay erases display by given EraseMode. +func EraseDisplay(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dJ", m)) +} + +// EraseLine erases lines by given EraseMode. +func EraseLine(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dK", m)) +} + +// ScrollUp scrolls up the page. +func ScrollUp(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dS", n)) +} + +// ScrollDown scrolls down the page. +func ScrollDown(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dT", n)) +} + +func init() { + EraseModes = struct { + All EraseMode + Head EraseMode + Tail EraseMode + }{ + Tail: 0, + Head: 1, + All: 2, + } + + Save = newAnsi(esc + "s") + Restore = newAnsi(esc + "u") + Hide = newAnsi(esc + "?25l") + Show = newAnsi(esc + "?25h") + Report = newAnsi(esc + "6n") +} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go new file mode 100644 index 000000000..e60722e6e --- /dev/null +++ b/vendor/github.com/morikuni/aec/ansi.go @@ -0,0 +1,59 @@ +package aec + +import ( + "fmt" + "strings" +) + +const esc = "\x1b[" + +// Reset resets SGR effect. +const Reset string = "\x1b[0m" + +var empty = newAnsi("") + +// ANSI represents ANSI escape code. +type ANSI interface { + fmt.Stringer + + // With adapts given ANSIs. + With(...ANSI) ANSI + + // Apply wraps given string in ANSI. + Apply(string) string +} + +type ansiImpl string + +func newAnsi(s string) *ansiImpl { + r := ansiImpl(s) + return &r +} + +func (a *ansiImpl) With(ansi ...ANSI) ANSI { + return concat(append([]ANSI{a}, ansi...)) +} + +func (a *ansiImpl) Apply(s string) string { + return a.String() + s + Reset +} + +func (a *ansiImpl) String() string { + return string(*a) +} + +// Apply wraps given string in ANSIs. +func Apply(s string, ansi ...ANSI) string { + if len(ansi) == 0 { + return s + } + return concat(ansi).Apply(s) +} + +func concat(ansi []ANSI) ANSI { + strs := make([]string, 0, len(ansi)) + for _, p := range ansi { + strs = append(strs, p.String()) + } + return newAnsi(strings.Join(strs, "")) +} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go new file mode 100644 index 000000000..13bd002d4 --- /dev/null +++ b/vendor/github.com/morikuni/aec/builder.go @@ -0,0 +1,388 @@ +package aec + +// Builder is a lightweight syntax to construct customized ANSI. +type Builder struct { + ANSI ANSI +} + +// EmptyBuilder is an initialized Builder. +var EmptyBuilder *Builder + +// NewBuilder creates a Builder from existing ANSI. +func NewBuilder(a ...ANSI) *Builder { + return &Builder{concat(a)} +} + +// With is a syntax for With. +func (builder *Builder) With(a ...ANSI) *Builder { + return NewBuilder(builder.ANSI.With(a...)) +} + +// Up is a syntax for Up. +func (builder *Builder) Up(n uint) *Builder { + return builder.With(Up(n)) +} + +// Down is a syntax for Down. +func (builder *Builder) Down(n uint) *Builder { + return builder.With(Down(n)) +} + +// Right is a syntax for Right. +func (builder *Builder) Right(n uint) *Builder { + return builder.With(Right(n)) +} + +// Left is a syntax for Left. +func (builder *Builder) Left(n uint) *Builder { + return builder.With(Left(n)) +} + +// NextLine is a syntax for NextLine. +func (builder *Builder) NextLine(n uint) *Builder { + return builder.With(NextLine(n)) +} + +// PreviousLine is a syntax for PreviousLine. +func (builder *Builder) PreviousLine(n uint) *Builder { + return builder.With(PreviousLine(n)) +} + +// Column is a syntax for Column. +func (builder *Builder) Column(col uint) *Builder { + return builder.With(Column(col)) +} + +// Position is a syntax for Position. +func (builder *Builder) Position(row, col uint) *Builder { + return builder.With(Position(row, col)) +} + +// EraseDisplay is a syntax for EraseDisplay. +func (builder *Builder) EraseDisplay(m EraseMode) *Builder { + return builder.With(EraseDisplay(m)) +} + +// EraseLine is a syntax for EraseLine. +func (builder *Builder) EraseLine(m EraseMode) *Builder { + return builder.With(EraseLine(m)) +} + +// ScrollUp is a syntax for ScrollUp. +func (builder *Builder) ScrollUp(n int) *Builder { + return builder.With(ScrollUp(n)) +} + +// ScrollDown is a syntax for ScrollDown. +func (builder *Builder) ScrollDown(n int) *Builder { + return builder.With(ScrollDown(n)) +} + +// Save is a syntax for Save. +func (builder *Builder) Save() *Builder { + return builder.With(Save) +} + +// Restore is a syntax for Restore. +func (builder *Builder) Restore() *Builder { + return builder.With(Restore) +} + +// Hide is a syntax for Hide. +func (builder *Builder) Hide() *Builder { + return builder.With(Hide) +} + +// Show is a syntax for Show. +func (builder *Builder) Show() *Builder { + return builder.With(Show) +} + +// Report is a syntax for Report. +func (builder *Builder) Report() *Builder { + return builder.With(Report) +} + +// Bold is a syntax for Bold. +func (builder *Builder) Bold() *Builder { + return builder.With(Bold) +} + +// Faint is a syntax for Faint. +func (builder *Builder) Faint() *Builder { + return builder.With(Faint) +} + +// Italic is a syntax for Italic. +func (builder *Builder) Italic() *Builder { + return builder.With(Italic) +} + +// Underline is a syntax for Underline. +func (builder *Builder) Underline() *Builder { + return builder.With(Underline) +} + +// BlinkSlow is a syntax for BlinkSlow. +func (builder *Builder) BlinkSlow() *Builder { + return builder.With(BlinkSlow) +} + +// BlinkRapid is a syntax for BlinkRapid. +func (builder *Builder) BlinkRapid() *Builder { + return builder.With(BlinkRapid) +} + +// Inverse is a syntax for Inverse. +func (builder *Builder) Inverse() *Builder { + return builder.With(Inverse) +} + +// Conceal is a syntax for Conceal. +func (builder *Builder) Conceal() *Builder { + return builder.With(Conceal) +} + +// CrossOut is a syntax for CrossOut. +func (builder *Builder) CrossOut() *Builder { + return builder.With(CrossOut) +} + +// BlackF is a syntax for BlackF. +func (builder *Builder) BlackF() *Builder { + return builder.With(BlackF) +} + +// RedF is a syntax for RedF. +func (builder *Builder) RedF() *Builder { + return builder.With(RedF) +} + +// GreenF is a syntax for GreenF. +func (builder *Builder) GreenF() *Builder { + return builder.With(GreenF) +} + +// YellowF is a syntax for YellowF. +func (builder *Builder) YellowF() *Builder { + return builder.With(YellowF) +} + +// BlueF is a syntax for BlueF. +func (builder *Builder) BlueF() *Builder { + return builder.With(BlueF) +} + +// MagentaF is a syntax for MagentaF. +func (builder *Builder) MagentaF() *Builder { + return builder.With(MagentaF) +} + +// CyanF is a syntax for CyanF. +func (builder *Builder) CyanF() *Builder { + return builder.With(CyanF) +} + +// WhiteF is a syntax for WhiteF. +func (builder *Builder) WhiteF() *Builder { + return builder.With(WhiteF) +} + +// DefaultF is a syntax for DefaultF. +func (builder *Builder) DefaultF() *Builder { + return builder.With(DefaultF) +} + +// BlackB is a syntax for BlackB. +func (builder *Builder) BlackB() *Builder { + return builder.With(BlackB) +} + +// RedB is a syntax for RedB. +func (builder *Builder) RedB() *Builder { + return builder.With(RedB) +} + +// GreenB is a syntax for GreenB. +func (builder *Builder) GreenB() *Builder { + return builder.With(GreenB) +} + +// YellowB is a syntax for YellowB. +func (builder *Builder) YellowB() *Builder { + return builder.With(YellowB) +} + +// BlueB is a syntax for BlueB. +func (builder *Builder) BlueB() *Builder { + return builder.With(BlueB) +} + +// MagentaB is a syntax for MagentaB. +func (builder *Builder) MagentaB() *Builder { + return builder.With(MagentaB) +} + +// CyanB is a syntax for CyanB. +func (builder *Builder) CyanB() *Builder { + return builder.With(CyanB) +} + +// WhiteB is a syntax for WhiteB. +func (builder *Builder) WhiteB() *Builder { + return builder.With(WhiteB) +} + +// DefaultB is a syntax for DefaultB. +func (builder *Builder) DefaultB() *Builder { + return builder.With(DefaultB) +} + +// Frame is a syntax for Frame. +func (builder *Builder) Frame() *Builder { + return builder.With(Frame) +} + +// Encircle is a syntax for Encircle. +func (builder *Builder) Encircle() *Builder { + return builder.With(Encircle) +} + +// Overline is a syntax for Overline. +func (builder *Builder) Overline() *Builder { + return builder.With(Overline) +} + +// LightBlackF is a syntax for LightBlueF. +func (builder *Builder) LightBlackF() *Builder { + return builder.With(LightBlackF) +} + +// LightRedF is a syntax for LightRedF. +func (builder *Builder) LightRedF() *Builder { + return builder.With(LightRedF) +} + +// LightGreenF is a syntax for LightGreenF. +func (builder *Builder) LightGreenF() *Builder { + return builder.With(LightGreenF) +} + +// LightYellowF is a syntax for LightYellowF. +func (builder *Builder) LightYellowF() *Builder { + return builder.With(LightYellowF) +} + +// LightBlueF is a syntax for LightBlueF. +func (builder *Builder) LightBlueF() *Builder { + return builder.With(LightBlueF) +} + +// LightMagentaF is a syntax for LightMagentaF. +func (builder *Builder) LightMagentaF() *Builder { + return builder.With(LightMagentaF) +} + +// LightCyanF is a syntax for LightCyanF. +func (builder *Builder) LightCyanF() *Builder { + return builder.With(LightCyanF) +} + +// LightWhiteF is a syntax for LightWhiteF. +func (builder *Builder) LightWhiteF() *Builder { + return builder.With(LightWhiteF) +} + +// LightBlackB is a syntax for LightBlackB. +func (builder *Builder) LightBlackB() *Builder { + return builder.With(LightBlackB) +} + +// LightRedB is a syntax for LightRedB. +func (builder *Builder) LightRedB() *Builder { + return builder.With(LightRedB) +} + +// LightGreenB is a syntax for LightGreenB. +func (builder *Builder) LightGreenB() *Builder { + return builder.With(LightGreenB) +} + +// LightYellowB is a syntax for LightYellowB. +func (builder *Builder) LightYellowB() *Builder { + return builder.With(LightYellowB) +} + +// LightBlueB is a syntax for LightBlueB. +func (builder *Builder) LightBlueB() *Builder { + return builder.With(LightBlueB) +} + +// LightMagentaB is a syntax for LightMagentaB. +func (builder *Builder) LightMagentaB() *Builder { + return builder.With(LightMagentaB) +} + +// LightCyanB is a syntax for LightCyanB. +func (builder *Builder) LightCyanB() *Builder { + return builder.With(LightCyanB) +} + +// LightWhiteB is a syntax for LightWhiteB. +func (builder *Builder) LightWhiteB() *Builder { + return builder.With(LightWhiteB) +} + +// Color3BitF is a syntax for Color3BitF. +func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { + return builder.With(Color3BitF(c)) +} + +// Color3BitB is a syntax for Color3BitB. +func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { + return builder.With(Color3BitB(c)) +} + +// Color8BitF is a syntax for Color8BitF. +func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { + return builder.With(Color8BitF(c)) +} + +// Color8BitB is a syntax for Color8BitB. +func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { + return builder.With(Color8BitB(c)) +} + +// FullColorF is a syntax for FullColorF. +func (builder *Builder) FullColorF(r, g, b uint8) *Builder { + return builder.With(FullColorF(r, g, b)) +} + +// FullColorB is a syntax for FullColorB. +func (builder *Builder) FullColorB(r, g, b uint8) *Builder { + return builder.With(FullColorB(r, g, b)) +} + +// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. +func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { + return builder.Color3BitF(NewRGB3Bit(r, g, b)) +} + +// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. +func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { + return builder.Color3BitB(NewRGB3Bit(r, g, b)) +} + +// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. +func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { + return builder.Color8BitF(NewRGB8Bit(r, g, b)) +} + +// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. +func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { + return builder.Color8BitB(NewRGB8Bit(r, g, b)) +} + +func init() { + EmptyBuilder = &Builder{empty} +} diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif new file mode 100644 index 000000000..c6c613bb7 Binary files /dev/null and b/vendor/github.com/morikuni/aec/sample.gif differ diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go new file mode 100644 index 000000000..0ba3464e6 --- /dev/null +++ b/vendor/github.com/morikuni/aec/sgr.go @@ -0,0 +1,202 @@ +package aec + +import ( + "fmt" +) + +// RGB3Bit is a 3bit RGB color. +type RGB3Bit uint8 + +// RGB8Bit is a 8bit RGB color. +type RGB8Bit uint8 + +func newSGR(n uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", n)) +} + +// NewRGB3Bit create a RGB3Bit from given RGB. +func NewRGB3Bit(r, g, b uint8) RGB3Bit { + return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) +} + +// NewRGB8Bit create a RGB8Bit from given RGB. +func NewRGB8Bit(r, g, b uint8) RGB8Bit { + return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) +} + +// Color3BitF set the foreground color of text. +func Color3BitF(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) +} + +// Color3BitB set the background color of text. +func Color3BitB(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) +} + +// Color8BitF set the foreground color of text. +func Color8BitF(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) +} + +// Color8BitB set the background color of text. +func Color8BitB(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) +} + +// FullColorF set the foreground color of text. +func FullColorF(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) +} + +// FullColorB set the foreground color of text. +func FullColorB(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) +} + +// Style +var ( + // Bold set the text style to bold or increased intensity. + Bold ANSI + + // Faint set the text style to faint. + Faint ANSI + + // Italic set the text style to italic. + Italic ANSI + + // Underline set the text style to underline. + Underline ANSI + + // BlinkSlow set the text style to slow blink. + BlinkSlow ANSI + + // BlinkRapid set the text style to rapid blink. + BlinkRapid ANSI + + // Inverse swap the foreground color and background color. + Inverse ANSI + + // Conceal set the text style to conceal. + Conceal ANSI + + // CrossOut set the text style to crossed out. + CrossOut ANSI + + // Frame set the text style to framed. + Frame ANSI + + // Encircle set the text style to encircled. + Encircle ANSI + + // Overline set the text style to overlined. + Overline ANSI +) + +// Foreground color of text. +var ( + // DefaultF is the default color of foreground. + DefaultF ANSI + + // Normal color + BlackF ANSI + RedF ANSI + GreenF ANSI + YellowF ANSI + BlueF ANSI + MagentaF ANSI + CyanF ANSI + WhiteF ANSI + + // Light color + LightBlackF ANSI + LightRedF ANSI + LightGreenF ANSI + LightYellowF ANSI + LightBlueF ANSI + LightMagentaF ANSI + LightCyanF ANSI + LightWhiteF ANSI +) + +// Background color of text. +var ( + // DefaultB is the default color of background. + DefaultB ANSI + + // Normal color + BlackB ANSI + RedB ANSI + GreenB ANSI + YellowB ANSI + BlueB ANSI + MagentaB ANSI + CyanB ANSI + WhiteB ANSI + + // Light color + LightBlackB ANSI + LightRedB ANSI + LightGreenB ANSI + LightYellowB ANSI + LightBlueB ANSI + LightMagentaB ANSI + LightCyanB ANSI + LightWhiteB ANSI +) + +func init() { + Bold = newSGR(1) + Faint = newSGR(2) + Italic = newSGR(3) + Underline = newSGR(4) + BlinkSlow = newSGR(5) + BlinkRapid = newSGR(6) + Inverse = newSGR(7) + Conceal = newSGR(8) + CrossOut = newSGR(9) + + BlackF = newSGR(30) + RedF = newSGR(31) + GreenF = newSGR(32) + YellowF = newSGR(33) + BlueF = newSGR(34) + MagentaF = newSGR(35) + CyanF = newSGR(36) + WhiteF = newSGR(37) + + DefaultF = newSGR(39) + + BlackB = newSGR(40) + RedB = newSGR(41) + GreenB = newSGR(42) + YellowB = newSGR(43) + BlueB = newSGR(44) + MagentaB = newSGR(45) + CyanB = newSGR(46) + WhiteB = newSGR(47) + + DefaultB = newSGR(49) + + Frame = newSGR(51) + Encircle = newSGR(52) + Overline = newSGR(53) + + LightBlackF = newSGR(90) + LightRedF = newSGR(91) + LightGreenF = newSGR(92) + LightYellowF = newSGR(93) + LightBlueF = newSGR(94) + LightMagentaF = newSGR(95) + LightCyanF = newSGR(96) + LightWhiteF = newSGR(97) + + LightBlackB = newSGR(100) + LightRedB = newSGR(101) + LightGreenB = newSGR(102) + LightYellowB = newSGR(103) + LightBlueB = newSGR(104) + LightMagentaB = newSGR(105) + LightCyanB = newSGR(106) + LightWhiteB = newSGR(107) +} diff --git a/vendor/github.com/openshift/imagebuilder/OWNERS b/vendor/github.com/openshift/imagebuilder/OWNERS index 6a900fd07..db859b7bd 100644 --- a/vendor/github.com/openshift/imagebuilder/OWNERS +++ b/vendor/github.com/openshift/imagebuilder/OWNERS @@ -1,5 +1,3 @@ -reviewers: -- bparees approvers: - TomSweeneyRedHat - mrunalp diff --git a/vendor/github.com/openshift/imagebuilder/README.md b/vendor/github.com/openshift/imagebuilder/README.md index fd96ed940..772747bce 100644 --- a/vendor/github.com/openshift/imagebuilder/README.md +++ b/vendor/github.com/openshift/imagebuilder/README.md @@ -64,6 +64,11 @@ $ imagebuilder -f Dockerfile:Dockerfile.extra . will build the current directory and combine the first Dockerfile with the second. The FROM in the second image is ignored. +Note that imagebuilder adds the built image to the `docker` daemon's internal storage. If you use `podman` you must first pull the image into its local registry: + +``` +$ podman pull docker-daemon:: # must contain either a tag or a digest +``` ## Code Example diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go index 86b139b65..5a2d0d539 100644 --- a/vendor/github.com/openshift/imagebuilder/builder.go +++ b/vendor/github.com/openshift/imagebuilder/builder.go @@ -212,6 +212,7 @@ func NewStages(node *parser.Node, b *Builder) (Stages, error) { Builder: &Builder{ Args: b.Args, AllowedArgs: b.AllowedArgs, + Env: b.Env, }, Node: root, }) @@ -436,7 +437,7 @@ func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error { SplitChildren(node, command.From) b.RunConfig = *image.Config - b.Env = b.RunConfig.Env + b.Env = append(b.Env, b.RunConfig.Env...) b.RunConfig.Env = nil // Check to see if we have a default PATH, note that windows won't diff --git a/vendor/github.com/openshift/imagebuilder/constants.go b/vendor/github.com/openshift/imagebuilder/constants.go index 86cd2e5e2..7b41e5a49 100644 --- a/vendor/github.com/openshift/imagebuilder/constants.go +++ b/vendor/github.com/openshift/imagebuilder/constants.go @@ -4,10 +4,6 @@ const ( // in docker/system NoBaseImageSpecifier = "scratch" - // not yet part of our import - commandArg = "arg" - commandStopSignal = "stopsignal" - // in docker/system defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ) diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go index 1ea358451..1bbb25f2b 100644 --- a/vendor/github.com/openshift/imagebuilder/evaluator.go +++ b/vendor/github.com/openshift/imagebuilder/evaluator.go @@ -20,16 +20,16 @@ func ParseDockerfile(r io.Reader) (*parser.Node, error) { // Environment variable interpolation will happen on these statements only. var replaceEnvAllowed = map[string]bool{ - command.Env: true, - command.Label: true, - command.Add: true, - command.Copy: true, - command.Workdir: true, - command.Expose: true, - command.Volume: true, - command.User: true, - commandStopSignal: true, - commandArg: true, + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, } // Certain commands are allowed to have their args split into more diff --git a/vendor/github.com/openshift/imagebuilder/vendor.conf b/vendor/github.com/openshift/imagebuilder/vendor.conf index e437b79c3..c3f7d1a6b 100644 --- a/vendor/github.com/openshift/imagebuilder/vendor.conf +++ b/vendor/github.com/openshift/imagebuilder/vendor.conf @@ -5,7 +5,6 @@ github.com/docker/go-connections 97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e github.com/docker/go-units 2fb04c6466a548a03cb009c5569ee1ab1e35398e github.com/fsouza/go-dockerclient openshift-4.0 https://github.com/openshift/go-dockerclient.git github.com/gogo/protobuf c5a62797aee0054613cc578653a16c6237fef080 -github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998 github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e github.com/Microsoft/go-winio 1a8911d1ed007260465c3bfbbc785ac6915a0bb8 github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 @@ -18,3 +17,4 @@ github.com/sirupsen/logrus d7b6bf5e4d26448fd977d07d745a2a66097ddecb golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba +k8s.io/klog 8e90cee79f823779174776412c13478955131846 diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 000000000..9857fe53d --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "context" + "sync" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 65a99869e..300561370 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -36,8 +36,12 @@ github.com/buger/goterm # github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/checkpoint-restore/go-criu github.com/checkpoint-restore/go-criu/rpc +# github.com/containerd/containerd v1.3.0 +github.com/containerd/containerd/errdefs # github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc -github.com/containerd/continuity/pathdriver +github.com/containerd/continuity/fs +github.com/containerd/continuity/sysx +github.com/containerd/continuity/syscallx # github.com/containernetworking/cni v0.7.1 github.com/containernetworking/cni/pkg/types github.com/containernetworking/cni/pkg/types/current @@ -51,7 +55,7 @@ github.com/containernetworking/plugins/pkg/ip github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator github.com/containernetworking/plugins/pkg/utils/hwaddr github.com/containernetworking/plugins/plugins/ipam/host-local/backend -# github.com/containers/buildah v1.11.3 +# github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e github.com/containers/buildah github.com/containers/buildah/imagebuildah github.com/containers/buildah/pkg/chrootuser @@ -68,44 +72,44 @@ github.com/containers/buildah/pkg/cgroups github.com/containers/buildah/pkg/overlay github.com/containers/buildah/pkg/unshare github.com/containers/buildah/pkg/umask -# github.com/containers/image/v4 v4.0.1 -github.com/containers/image/v4/directory -github.com/containers/image/v4/docker -github.com/containers/image/v4/docker/archive -github.com/containers/image/v4/manifest -github.com/containers/image/v4/pkg/docker/config -github.com/containers/image/v4/signature -github.com/containers/image/v4/transports -github.com/containers/image/v4/transports/alltransports -github.com/containers/image/v4/types -github.com/containers/image/v4/oci/archive -github.com/containers/image/v4/storage -github.com/containers/image/v4/copy -github.com/containers/image/v4/docker/reference -github.com/containers/image/v4/docker/tarfile -github.com/containers/image/v4/oci/layout -github.com/containers/image/v4/tarball -github.com/containers/image/v4/pkg/sysregistriesv2 -github.com/containers/image/v4/image -github.com/containers/image/v4/directory/explicitfilepath -github.com/containers/image/v4/docker/policyconfiguration -github.com/containers/image/v4/pkg/blobinfocache/none -github.com/containers/image/v4/pkg/tlsclientconfig -github.com/containers/image/v4/pkg/compression -github.com/containers/image/v4/pkg/strslice -github.com/containers/image/v4/internal/pkg/keyctl -github.com/containers/image/v4/version -github.com/containers/image/v4/docker/daemon -github.com/containers/image/v4/openshift -github.com/containers/image/v4/ostree -github.com/containers/image/v4/pkg/compression/types -github.com/containers/image/v4/internal/tmpdir -github.com/containers/image/v4/oci/internal -github.com/containers/image/v4/pkg/blobinfocache -github.com/containers/image/v4/pkg/compression/internal -github.com/containers/image/v4/pkg/blobinfocache/boltdb -github.com/containers/image/v4/pkg/blobinfocache/memory -github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize +# github.com/containers/image/v5 v5.0.0 +github.com/containers/image/v5/directory +github.com/containers/image/v5/docker +github.com/containers/image/v5/docker/archive +github.com/containers/image/v5/manifest +github.com/containers/image/v5/pkg/docker/config +github.com/containers/image/v5/signature +github.com/containers/image/v5/transports +github.com/containers/image/v5/transports/alltransports +github.com/containers/image/v5/types +github.com/containers/image/v5/oci/archive +github.com/containers/image/v5/storage +github.com/containers/image/v5/copy +github.com/containers/image/v5/docker/reference +github.com/containers/image/v5/docker/tarfile +github.com/containers/image/v5/oci/layout +github.com/containers/image/v5/tarball +github.com/containers/image/v5/pkg/sysregistriesv2 +github.com/containers/image/v5/image +github.com/containers/image/v5/directory/explicitfilepath +github.com/containers/image/v5/docker/policyconfiguration +github.com/containers/image/v5/pkg/blobinfocache/none +github.com/containers/image/v5/pkg/tlsclientconfig +github.com/containers/image/v5/pkg/compression +github.com/containers/image/v5/pkg/strslice +github.com/containers/image/v5/internal/pkg/keyctl +github.com/containers/image/v5/version +github.com/containers/image/v5/docker/daemon +github.com/containers/image/v5/openshift +github.com/containers/image/v5/ostree +github.com/containers/image/v5/pkg/compression/types +github.com/containers/image/v5/internal/tmpdir +github.com/containers/image/v5/oci/internal +github.com/containers/image/v5/pkg/blobinfocache +github.com/containers/image/v5/pkg/compression/internal +github.com/containers/image/v5/pkg/blobinfocache/boltdb +github.com/containers/image/v5/pkg/blobinfocache/memory +github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize # github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b github.com/containers/libtrust # github.com/containers/psgo v1.3.2 @@ -183,7 +187,7 @@ github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory github.com/docker/distribution/metrics -# github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b +# github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce github.com/docker/docker/pkg/signal github.com/docker/docker/pkg/homedir github.com/docker/docker/oci/caps @@ -193,7 +197,6 @@ github.com/docker/docker/pkg/ioutils github.com/docker/docker/profiles/seccomp github.com/docker/docker/pkg/parsers github.com/docker/docker/api/types/versions -github.com/docker/docker/pkg/idtools github.com/docker/docker/errdefs github.com/docker/docker/pkg/term/windows github.com/docker/docker/pkg/longpath @@ -201,7 +204,9 @@ github.com/docker/docker/api/types github.com/docker/docker/pkg/parsers/kernel github.com/docker/docker/api/types/registry github.com/docker/docker/api/types/swarm +github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/fileutils +github.com/docker/docker/pkg/jsonmessage github.com/docker/docker/pkg/stdcopy github.com/docker/docker/pkg/system github.com/docker/docker/client @@ -210,6 +215,7 @@ github.com/docker/docker/api/types/filters github.com/docker/docker/api/types/mount github.com/docker/docker/api/types/network github.com/docker/docker/api/types/swarm/runtime +github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/pools github.com/docker/docker/pkg/mount github.com/docker/docker/api @@ -243,11 +249,8 @@ github.com/etcd-io/bbolt github.com/fatih/camelcase # github.com/fsnotify/fsnotify v1.4.7 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.4.4 +# github.com/fsouza/go-dockerclient v1.5.0 github.com/fsouza/go-dockerclient -github.com/fsouza/go-dockerclient/internal/archive -github.com/fsouza/go-dockerclient/internal/jsonmessage -github.com/fsouza/go-dockerclient/internal/term # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml # github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f @@ -277,8 +280,6 @@ github.com/hpcloud/tail/ratelimiter github.com/hpcloud/tail/util github.com/hpcloud/tail/watch github.com/hpcloud/tail/winfile -# github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd -github.com/ijc/Gotty # github.com/imdario/mergo v0.3.7 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 @@ -312,6 +313,8 @@ github.com/mistifyio/go-zfs github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 +# github.com/morikuni/aec v1.0.0 +github.com/morikuni/aec # github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 github.com/mrunalp/fileutils # github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c @@ -384,7 +387,7 @@ github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/go-selinux # github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible github.com/openshift/api/config/v1 -# github.com/openshift/imagebuilder v1.1.0 +# github.com/openshift/imagebuilder v1.1.1 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/parser github.com/openshift/imagebuilder/dockerfile/command @@ -490,7 +493,7 @@ github.com/xeipuuv/gojsonpointer github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.1.0 github.com/xeipuuv/gojsonschema -# golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 +# golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad golang.org/x/crypto/ssh/terminal golang.org/x/crypto/openpgp golang.org/x/crypto/openpgp/armor @@ -516,6 +519,7 @@ golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sync/semaphore +golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 golang.org/x/sys/unix golang.org/x/sys/windows -- cgit v1.2.3-54-g00ecf